[{"data":1,"prerenderedAt":2734},["ShallowReactive",2],{"/en-us/blog/tags/cloud-native":3,"navigation-en-us":20,"banner-en-us":449,"footer-en-us":466,"footer-source-/en-us/blog/tags/cloud-native/":709,"cloud native-tag-page-en-us":712},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":11,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},"/en-us/blog/tags/cloud-native","tags",false,"",{"tag":9,"tagSlug":10},"cloud native","cloud-native",{"template":12},"BlogTag","content:en-us:blog:tags:cloud-native.yml","yaml","Cloud Native","content","en-us/blog/tags/cloud-native.yml","en-us/blog/tags/cloud-native","yml",{"_path":21,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":445,"_type":14,"title":446,"_source":16,"_file":447,"_stem":448,"_extension":19},"/shared/en-us/main-navigation","en-us",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":376,"minimal":407,"duo":426,"pricingDeployment":435},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,186,191,297,357],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":168},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,147],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/application-security-testing/","security and compliance","ShieldCheckLight",[133,137,142],{"text":134,"config":135},"Application Security Testing",{"href":129,"dataGaName":136,"dataGaLocation":28},"Application security testing",{"text":138,"config":139},"Software Supply Chain Security",{"href":140,"dataGaLocation":28,"dataGaName":141},"/solutions/supply-chain/","Software supply chain security",{"text":143,"config":144},"Software Compliance",{"href":145,"dataGaName":146,"dataGaLocation":28},"/solutions/software-compliance/","software compliance",{"title":148,"link":149,"items":154},"Measurement",{"config":150},{"icon":151,"href":152,"dataGaName":153,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[155,159,163],{"text":156,"config":157},"Visibility & Measurement",{"href":152,"dataGaLocation":28,"dataGaName":158},"Visibility and Measurement",{"text":160,"config":161},"Value Stream Management",{"href":162,"dataGaLocation":28,"dataGaName":160},"/solutions/value-stream-management/",{"text":164,"config":165},"Analytics & Insights",{"href":166,"dataGaLocation":28,"dataGaName":167},"/solutions/analytics-and-insights/","Analytics and insights",{"title":169,"items":170},"GitLab for",[171,176,181],{"text":172,"config":173},"Enterprise",{"href":174,"dataGaLocation":28,"dataGaName":175},"/enterprise/","enterprise",{"text":177,"config":178},"Small Business",{"href":179,"dataGaLocation":28,"dataGaName":180},"/small-business/","small business",{"text":182,"config":183},"Public Sector",{"href":184,"dataGaLocation":28,"dataGaName":185},"/solutions/public-sector/","public sector",{"text":187,"config":188},"Pricing",{"href":189,"dataGaName":190,"dataGaLocation":28,"dataNavLevelOne":190},"/pricing/","pricing",{"text":192,"config":193,"link":195,"lists":199,"feature":284},"Resources",{"dataNavLevelOne":194},"resources",{"text":196,"config":197},"View all resources",{"href":198,"dataGaName":194,"dataGaLocation":28},"/resources/",[200,233,256],{"title":201,"items":202},"Getting started",[203,208,213,218,223,228],{"text":204,"config":205},"Install",{"href":206,"dataGaName":207,"dataGaLocation":28},"/install/","install",{"text":209,"config":210},"Quick start guides",{"href":211,"dataGaName":212,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":214,"config":215},"Learn",{"href":216,"dataGaLocation":28,"dataGaName":217},"https://university.gitlab.com/","learn",{"text":219,"config":220},"Product documentation",{"href":221,"dataGaName":222,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":224,"config":225},"Best practice videos",{"href":226,"dataGaName":227,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":229,"config":230},"Integrations",{"href":231,"dataGaName":232,"dataGaLocation":28},"/integrations/","integrations",{"title":234,"items":235},"Discover",[236,241,246,251],{"text":237,"config":238},"Customer success stories",{"href":239,"dataGaName":240,"dataGaLocation":28},"/customers/","customer success stories",{"text":242,"config":243},"Blog",{"href":244,"dataGaName":245,"dataGaLocation":28},"/blog/","blog",{"text":247,"config":248},"Remote",{"href":249,"dataGaName":250,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":252,"config":253},"TeamOps",{"href":254,"dataGaName":255,"dataGaLocation":28},"/teamops/","teamops",{"title":257,"items":258},"Connect",[259,264,269,274,279],{"text":260,"config":261},"GitLab Services",{"href":262,"dataGaName":263,"dataGaLocation":28},"/services/","services",{"text":265,"config":266},"Community",{"href":267,"dataGaName":268,"dataGaLocation":28},"/community/","community",{"text":270,"config":271},"Forum",{"href":272,"dataGaName":273,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":275,"config":276},"Events",{"href":277,"dataGaName":278,"dataGaLocation":28},"/events/","events",{"text":280,"config":281},"Partners",{"href":282,"dataGaName":283,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":285,"textColor":286,"text":287,"image":288,"link":292},"#2f2a6b","#fff","Insights for the future of software development",{"altText":289,"config":290},"the source promo card",{"src":291},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758208064/dzl0dbift9xdizyelkk4.svg",{"text":293,"config":294},"Read the latest",{"href":295,"dataGaName":296,"dataGaLocation":28},"/the-source/","the source",{"text":298,"config":299,"lists":301},"Company",{"dataNavLevelOne":300},"company",[302],{"items":303},[304,309,315,317,322,327,332,337,342,347,352],{"text":305,"config":306},"About",{"href":307,"dataGaName":308,"dataGaLocation":28},"/company/","about",{"text":310,"config":311,"footerGa":314},"Jobs",{"href":312,"dataGaName":313,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":313},{"text":275,"config":316},{"href":277,"dataGaName":278,"dataGaLocation":28},{"text":318,"config":319},"Leadership",{"href":320,"dataGaName":321,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":323,"config":324},"Team",{"href":325,"dataGaName":326,"dataGaLocation":28},"/company/team/","team",{"text":328,"config":329},"Handbook",{"href":330,"dataGaName":331,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":333,"config":334},"Investor relations",{"href":335,"dataGaName":336,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":338,"config":339},"Trust Center",{"href":340,"dataGaName":341,"dataGaLocation":28},"/security/","trust center",{"text":343,"config":344},"AI Transparency Center",{"href":345,"dataGaName":346,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":348,"config":349},"Newsletter",{"href":350,"dataGaName":351,"dataGaLocation":28},"/company/contact/","newsletter",{"text":353,"config":354},"Press",{"href":355,"dataGaName":356,"dataGaLocation":28},"/press/","press",{"text":358,"config":359,"lists":360},"Contact us",{"dataNavLevelOne":300},[361],{"items":362},[363,366,371],{"text":35,"config":364},{"href":37,"dataGaName":365,"dataGaLocation":28},"talk to sales",{"text":367,"config":368},"Get help",{"href":369,"dataGaName":370,"dataGaLocation":28},"/support/","get help",{"text":372,"config":373},"Customer portal",{"href":374,"dataGaName":375,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":377,"login":378,"suggestions":385},"Close",{"text":379,"link":380},"To search repositories and projects, login to",{"text":381,"config":382},"gitlab.com",{"href":42,"dataGaName":383,"dataGaLocation":384},"search login","search",{"text":386,"default":387},"Suggestions",[388,390,394,396,400,404],{"text":57,"config":389},{"href":62,"dataGaName":57,"dataGaLocation":384},{"text":391,"config":392},"Code Suggestions (AI)",{"href":393,"dataGaName":391,"dataGaLocation":384},"/solutions/code-suggestions/",{"text":109,"config":395},{"href":111,"dataGaName":109,"dataGaLocation":384},{"text":397,"config":398},"GitLab on AWS",{"href":399,"dataGaName":397,"dataGaLocation":384},"/partners/technology-partners/aws/",{"text":401,"config":402},"GitLab on Google Cloud",{"href":403,"dataGaName":401,"dataGaLocation":384},"/partners/technology-partners/google-cloud-platform/",{"text":405,"config":406},"Why GitLab?",{"href":70,"dataGaName":405,"dataGaLocation":384},{"freeTrial":408,"mobileIcon":413,"desktopIcon":418,"secondaryButton":421},{"text":409,"config":410},"Start free trial",{"href":411,"dataGaName":33,"dataGaLocation":412},"https://gitlab.com/-/trials/new/","nav",{"altText":414,"config":415},"Gitlab Icon",{"src":416,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203874/jypbw1jx72aexsoohd7x.svg","gitlab icon",{"altText":414,"config":419},{"src":420,"dataGaName":417,"dataGaLocation":412},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1758203875/gs4c8p8opsgvflgkswz9.svg",{"text":422,"config":423},"Get Started",{"href":424,"dataGaName":425,"dataGaLocation":412},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":427,"mobileIcon":431,"desktopIcon":433},{"text":428,"config":429},"Learn more about GitLab Duo",{"href":62,"dataGaName":430,"dataGaLocation":412},"gitlab duo",{"altText":414,"config":432},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":434},{"src":420,"dataGaName":417,"dataGaLocation":412},{"freeTrial":436,"mobileIcon":441,"desktopIcon":443},{"text":437,"config":438},"Back to pricing",{"href":189,"dataGaName":439,"dataGaLocation":412,"icon":440},"back to pricing","GoBack",{"altText":414,"config":442},{"src":416,"dataGaName":417,"dataGaLocation":412},{"altText":414,"config":444},{"src":420,"dataGaName":417,"dataGaLocation":412},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":450,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"title":451,"button":452,"image":457,"config":461,"_id":463,"_type":14,"_source":16,"_file":464,"_stem":465,"_extension":19},"/shared/en-us/banner","is now in public beta!",{"text":453,"config":454},"Try the Beta",{"href":455,"dataGaName":456,"dataGaLocation":28},"/gitlab-duo/agent-platform/","duo banner",{"altText":458,"config":459},"GitLab Duo Agent Platform",{"src":460},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1753720689/somrf9zaunk0xlt7ne4x.svg",{"layout":462},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":467,"_dir":22,"_draft":6,"_partial":6,"_locale":7,"data":468,"_id":705,"_type":14,"title":706,"_source":16,"_file":707,"_stem":708,"_extension":19},"/shared/en-us/main-footer",{"text":469,"source":470,"edit":476,"contribute":481,"config":486,"items":491,"minimal":697},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":471,"config":472},"View page source",{"href":473,"dataGaName":474,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":477,"config":478},"Edit this page",{"href":479,"dataGaName":480,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":482,"config":483},"Please contribute",{"href":484,"dataGaName":485,"dataGaLocation":475},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":487,"facebook":488,"youtube":489,"linkedin":490},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[492,539,590,634,663],{"title":187,"links":493,"subMenu":508},[494,498,503],{"text":495,"config":496},"View plans",{"href":189,"dataGaName":497,"dataGaLocation":475},"view plans",{"text":499,"config":500},"Why Premium?",{"href":501,"dataGaName":502,"dataGaLocation":475},"/pricing/premium/","why premium",{"text":504,"config":505},"Why Ultimate?",{"href":506,"dataGaName":507,"dataGaLocation":475},"/pricing/ultimate/","why ultimate",[509],{"title":510,"links":511},"Contact Us",[512,515,517,519,524,529,534],{"text":513,"config":514},"Contact sales",{"href":37,"dataGaName":38,"dataGaLocation":475},{"text":367,"config":516},{"href":369,"dataGaName":370,"dataGaLocation":475},{"text":372,"config":518},{"href":374,"dataGaName":375,"dataGaLocation":475},{"text":520,"config":521},"Status",{"href":522,"dataGaName":523,"dataGaLocation":475},"https://status.gitlab.com/","status",{"text":525,"config":526},"Terms of use",{"href":527,"dataGaName":528,"dataGaLocation":475},"/terms/","terms of use",{"text":530,"config":531},"Privacy statement",{"href":532,"dataGaName":533,"dataGaLocation":475},"/privacy/","privacy statement",{"text":535,"config":536},"Cookie preferences",{"dataGaName":537,"dataGaLocation":475,"id":538,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"title":90,"links":540,"subMenu":548},[541,545],{"text":542,"config":543},"DevSecOps platform",{"href":55,"dataGaName":544,"dataGaLocation":475},"devsecops platform",{"text":113,"config":546},{"href":62,"dataGaName":547,"dataGaLocation":475},"ai-assisted development",[549],{"title":550,"links":551},"Topics",[552,557,562,567,572,577,580,585],{"text":553,"config":554},"CICD",{"href":555,"dataGaName":556,"dataGaLocation":475},"/topics/ci-cd/","cicd",{"text":558,"config":559},"GitOps",{"href":560,"dataGaName":561,"dataGaLocation":475},"/topics/gitops/","gitops",{"text":563,"config":564},"DevOps",{"href":565,"dataGaName":566,"dataGaLocation":475},"/topics/devops/","devops",{"text":568,"config":569},"Version Control",{"href":570,"dataGaName":571,"dataGaLocation":475},"/topics/version-control/","version control",{"text":573,"config":574},"DevSecOps",{"href":575,"dataGaName":576,"dataGaLocation":475},"/topics/devsecops/","devsecops",{"text":15,"config":578},{"href":579,"dataGaName":9,"dataGaLocation":475},"/topics/cloud-native/",{"text":581,"config":582},"AI for Coding",{"href":583,"dataGaName":584,"dataGaLocation":475},"/topics/devops/ai-for-coding/","ai for coding",{"text":586,"config":587},"Agentic AI",{"href":588,"dataGaName":589,"dataGaLocation":475},"/topics/agentic-ai/","agentic ai",{"title":591,"links":592},"Solutions",[593,595,597,602,606,609,613,616,618,621,624,629],{"text":134,"config":594},{"href":129,"dataGaName":134,"dataGaLocation":475},{"text":123,"config":596},{"href":105,"dataGaName":106,"dataGaLocation":475},{"text":598,"config":599},"Agile development",{"href":600,"dataGaName":601,"dataGaLocation":475},"/solutions/agile-delivery/","agile delivery",{"text":603,"config":604},"SCM",{"href":119,"dataGaName":605,"dataGaLocation":475},"source code management",{"text":553,"config":607},{"href":111,"dataGaName":608,"dataGaLocation":475},"continuous integration & delivery",{"text":610,"config":611},"Value stream management",{"href":162,"dataGaName":612,"dataGaLocation":475},"value stream management",{"text":558,"config":614},{"href":615,"dataGaName":561,"dataGaLocation":475},"/solutions/gitops/",{"text":172,"config":617},{"href":174,"dataGaName":175,"dataGaLocation":475},{"text":619,"config":620},"Small business",{"href":179,"dataGaName":180,"dataGaLocation":475},{"text":622,"config":623},"Public sector",{"href":184,"dataGaName":185,"dataGaLocation":475},{"text":625,"config":626},"Education",{"href":627,"dataGaName":628,"dataGaLocation":475},"/solutions/education/","education",{"text":630,"config":631},"Financial services",{"href":632,"dataGaName":633,"dataGaLocation":475},"/solutions/finance/","financial services",{"title":192,"links":635},[636,638,640,642,645,647,649,651,653,655,657,659,661],{"text":204,"config":637},{"href":206,"dataGaName":207,"dataGaLocation":475},{"text":209,"config":639},{"href":211,"dataGaName":212,"dataGaLocation":475},{"text":214,"config":641},{"href":216,"dataGaName":217,"dataGaLocation":475},{"text":219,"config":643},{"href":221,"dataGaName":644,"dataGaLocation":475},"docs",{"text":242,"config":646},{"href":244,"dataGaName":245,"dataGaLocation":475},{"text":237,"config":648},{"href":239,"dataGaName":240,"dataGaLocation":475},{"text":247,"config":650},{"href":249,"dataGaName":250,"dataGaLocation":475},{"text":260,"config":652},{"href":262,"dataGaName":263,"dataGaLocation":475},{"text":252,"config":654},{"href":254,"dataGaName":255,"dataGaLocation":475},{"text":265,"config":656},{"href":267,"dataGaName":268,"dataGaLocation":475},{"text":270,"config":658},{"href":272,"dataGaName":273,"dataGaLocation":475},{"text":275,"config":660},{"href":277,"dataGaName":278,"dataGaLocation":475},{"text":280,"config":662},{"href":282,"dataGaName":283,"dataGaLocation":475},{"title":298,"links":664},[665,667,669,671,673,675,677,681,686,688,690,692],{"text":305,"config":666},{"href":307,"dataGaName":300,"dataGaLocation":475},{"text":310,"config":668},{"href":312,"dataGaName":313,"dataGaLocation":475},{"text":318,"config":670},{"href":320,"dataGaName":321,"dataGaLocation":475},{"text":323,"config":672},{"href":325,"dataGaName":326,"dataGaLocation":475},{"text":328,"config":674},{"href":330,"dataGaName":331,"dataGaLocation":475},{"text":333,"config":676},{"href":335,"dataGaName":336,"dataGaLocation":475},{"text":678,"config":679},"Sustainability",{"href":680,"dataGaName":678,"dataGaLocation":475},"/sustainability/",{"text":682,"config":683},"Diversity, inclusion and belonging (DIB)",{"href":684,"dataGaName":685,"dataGaLocation":475},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":338,"config":687},{"href":340,"dataGaName":341,"dataGaLocation":475},{"text":348,"config":689},{"href":350,"dataGaName":351,"dataGaLocation":475},{"text":353,"config":691},{"href":355,"dataGaName":356,"dataGaLocation":475},{"text":693,"config":694},"Modern Slavery Transparency Statement",{"href":695,"dataGaName":696,"dataGaLocation":475},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"items":698},[699,701,703],{"text":525,"config":700},{"href":527,"dataGaName":528,"dataGaLocation":475},{"text":530,"config":702},{"href":532,"dataGaName":533,"dataGaLocation":475},{"text":535,"config":704},{"dataGaName":537,"dataGaLocation":475,"id":538,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":710,"config":711,"_id":13,"_type":14,"title":15,"_source":16,"_file":17,"_stem":18,"_extension":19},{"tag":9,"tagSlug":10},{"template":12},[713,739,762,782,803,824,845,866,885,904,924,944,965,986,1006,1028,1048,1069,1089,1110,1131,1155,1175,1195,1215,1236,1258,1278,1299,1316,1335,1353,1373,1395,1415,1437,1460,1483,1503,1523,1541,1562,1582,1600,1620,1641,1662,1681,1700,1720,1739,1759,1778,1799,1818,1836,1856,1876,1898,1917,1936,1955,1973,1992,2011,2032,2050,2070,2088,2107,2128,2148,2169,2189,2211,2231,2252,2273,2294,2314,2335,2354,2373,2393,2411,2430,2449,2469,2488,2506,2525,2544,2563,2582,2602,2621,2640,2659,2677,2696,2714],{"_path":714,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":715,"content":723,"config":732,"_id":735,"_type":14,"title":736,"_source":16,"_file":737,"_stem":738,"_extension":19},"/en-us/blog/accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud",{"title":716,"description":717,"ogTitle":716,"ogDescription":717,"noIndex":6,"ogImage":718,"ogUrl":719,"ogSiteName":720,"ogType":721,"canonicalUrls":719,"schema":722},"GitLab & Google Cloud partnership accelerates cloud adoption","Learn how Cloud Seed came about and how it will help speed app modernization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665811/Blog/Hero%20Images/daytime-clouds.jpg","https://about.gitlab.com/blog/accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Accelerate cloud adoption with GitLab's open source partnership with Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sri Rangan\"}],\n        \"datePublished\": \"2022-10-11\",\n      }",{"title":724,"description":717,"authors":725,"heroImage":718,"date":727,"body":728,"category":729,"tags":730},"Accelerate cloud adoption with GitLab's open source partnership with Google Cloud",[726],"Sri Rangan","2022-10-11","\nSince December 2021, GitLab Incubation has partnered with Google Cloud to develop\nsolutions that will help customers address one of their biggest business requirements: accelerating cloud adoption.\n\nWe are thrilled to announce the release of Cloud Seed at Google Cloud Next 2022,\nand we are even more excited to follow up with our community. Cloud Seed is an open\nsource partnership between GitLab and Google Cloud to accelerate cloud adoption and\napp modernization.\n\nThe origins of Cloud Seed date back to late 2020 when I worked closely with GitLab co-founder and CEO [Sid Sijbrandij]( /company/team/#sytses) on an experiment called “5 Minute Production\". Our focus was to improve developer experience while consuming cloud services and enabling DevSecOps best practices by default.\n\nFor this, GitLab needed to collaborate with the hyper clouds, and Google Cloud emerged as our natural choice. In this post I’d like to shed light on our collaboration, the results our partnership has achieved, and the positive business outcomes our customers will realize.\n\n## Refining the use case\n\nFirst, we reached out and polled our customers to try and understand their cloud adoption use cases. \n\nWe found the enterprise market segment focused on migrating existing systems to the cloud to achieve their digital transformation targets, while the SMB and startup segment focused on embracing the cloud for greenfield initiatives.\n\n## Cloud Run and Cloud SQL\n\nWhile motivations for enterprise and SMB segments varied, the underlying use case —– deploying web applications to the cloud —– remained the same. Thus, we selected two of the more popular Google Cloud managed services that web applications make use of: [Cloud Run](https://cloud.google.com/run) and [Cloud SQL](https://cloud.google.com/sql).\n\nCloud Run makes it possible to build and deploy scalable containerized apps written in any language (including Go, Python, Java, Node.js, .NET, and Ruby) on a fully managed platform. Meanwhile, Cloud SQL is a fully managed relational database service for MySQL, PostgreSQL, and SQL Server with rich extension collections, configuration flags, and developer ecosystems.\n\n## Open source collaboration\n\nGitLab comes with a rich tradition of [open source](/solutions/open-source/). Our partners at Google Cloud understood and complemented that remarkably, which made for a close collaboration between our two teams. We agreed quite early in the process that all capabilities built within Cloud Seed will be open source and, therefore, available for all GitLab users regardless of their market segment, license tier, or any other consideration.\n\n## Preview environments on Cloud Run\n\nThe Cloud Seed private beta was made available to trusted testers in May 2022, and based on the successful beta program, Preview Environments with GitLab and Cloud Run emerged among the most popular use cases.\n\n**Take a look at Preview Environments on GitLab with Cloud Seed:**\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zDMGCyAgCPY\" title=\"Preview Environments on GitLab with Cloud Seed\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nMost Git-based development workflows make use of temporary feature branches. In larger teams and organizations, it is required that feature branches are made available for review and testing.\n\nWith Cloud Seed, a Cloud Run deployment pipeline can be generated in less than two minutes that deploys all feature branches to Cloud Run. Given Cloud Run’s free tier, this can be a cost-effective method to deploy and manage preview environments.\n\n## Relational databases with Cloud SQL\n\nAnother common use case, typically in the app migration scenario, is to set up and migrate relational databases in the cloud. Our beta-test users voted for Cloud SQL as their most popular data storage option among a myriad of Google Cloud services.\n\nWith Cloud Seed, traditional relational databases such as Postgres, MySQL, and SQL Server can be spun up from the GitLab web UI. Similar to the Cloud Run workflow described above, these database instances can be made branch, tag, and environment specific. Alternatively, a GitLab project can be spun up for database operations, where Cloud Seed creates a suitable Cloud SQL instance while the Git repository serves as the host for configuration and migration operations.\n\n## Looking ahead\n\nOur purpose is clear: We learn from our users and customers about their use cases and needs, and we build capabilities to support them through their cloud adoption journeys. We are thrilled to announce the release of Cloud Seed at [Google Cloud Next '22](https://cloud.withgoogle.com/next), and we are even more excited to follow up with our community. Connect with us @OpenCloudSeed on Twitter and try out Cloud Seed today at GitLab.com.\n","open-source",[283,731,9],"open source",{"slug":733,"featured":6,"template":734},"accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud","BlogPost","content:en-us:blog:accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud.yml","Accelerate Cloud Adoption With Gitlabs Open Source Partnership With Google Cloud","en-us/blog/accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud.yml","en-us/blog/accelerate-cloud-adoption-with-gitlabs-open-source-partnership-with-google-cloud",{"_path":740,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":741,"content":747,"config":756,"_id":758,"_type":14,"title":759,"_source":16,"_file":760,"_stem":761,"_extension":19},"/en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"title":742,"description":743,"ogTitle":742,"ogDescription":743,"noIndex":6,"ogImage":744,"ogUrl":745,"ogSiteName":720,"ogType":721,"canonicalUrls":745,"schema":746},"How adSoul transitioned to GitLab CI from Jenkins","adSoul, a marketing automation company, outlines a successful three-phase migration plan for moving to GitLab CI from Jenkins.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678442/Blog/Hero%20Images/londoncommit.png","https://about.gitlab.com/blog/adsoul-devops-transition-to-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How adSoul transitioned to GitLab CI from Jenkins\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brein Matturro\"}],\n        \"datePublished\": \"2019-11-05\",\n      }",{"title":742,"description":743,"authors":748,"heroImage":744,"date":750,"body":751,"category":752,"tags":753},[749],"Brein Matturro","2019-11-05","\nadSoul is a Germany-based data-driven online marketing company that aims to improve search engine advertising and scalability for businesses. The core of adSoul relies heavily on API interfaces and entity recognition to post keywords on Google and Bing with marketing automation. \n\nAt GitLab Commit London, [Philipp Westphalen](https://www.linkedin.com/in/philipp-westphalen-a83318188/), fullstack developer at adSoul and GitLab Hero, shares how the company transitioned from Jenkins to GiLab CI. adSoul is a startup company with five developers, and as Philipp says “We literally have no time for everything we need to do.” They were looking for a tool that requires less time-consuming maintanence, and with Jenkins the team found it hard to read their existing files. “Our Jenkins was not so stable at all and it was tough to change because it was managed by our provider,” Philipp says. Cost and visibility were also huge motivators in moving away from [Jenkins to Gitlab CI](/blog/docker-my-precious/).\n\n## GitLab migration in three phases\n\nPhase 1: Move the repository.\nThe [adSoul team](https://www.adsoul.com) used the GitHub Import by GitLab, but had setbacks with migrating their issues, so they created a GitHub open source issue migrator as a resolution. Following that, they modified scripts with the new origin by exchanging the GitHub API call with a GitLab API. “This was really easy and we had a stable build with our new repository, so we could move our product management to GitLab and not need GitHub anymore,” Philipp says.\n\nPhase 2: Migrate the CI/CD pipeline.\nThe team started to create a GitLab CI YAML and tried to do a simple ‘lift and shift,’ however their processes were more complicated than anticipated. Though this phase was time consuming, it became clear the team could move to phase three without hiccups. “Quick pro tip,” says Philipp. “If you’re running your own GitLab runners, increase the log limit if you have to debug your building step.” \n\nPhase 3: Improve the CI/CD pipeline.\nThe team thought about ways of building their software, so they split projects into steps. “Our idea was that one job does one thing perfectly. Each job is simple and everyone can modify it easily” Philipp says. They improved their build time by moving to Gradle, created parallel job processing, and by using standard Docker images for ease of management. \n\n## Takeaways from a successful migration\n\n1. Plan your migration. Get every member of the team involved and aware of the upcoming changes, including how tools are working together and what the expectations are moving forward. “Take your time for the migration,” Philipp says. “It’s not two days and then we are finished.” \n\n2. Go step by step. adSoul used a three phase plan which allowed the team to deploy a new version and still continue to work on existing projects. “We could improve our application without having to wait for a better infrastructure,” Philipp says.\n\n3. Rethink your [DevOps strategy](/blog/better-devops-with-gitlab-ci-cd/). In the time leading up to the migration, examine things like security automation and other important pieces in a DevOps overall strategy.\n\n4. Start with a small project. Work closely with colleagues to create small GitLab CI projects to familiarize everyone before creating larger, overwhelming projects.\n\nPro tip: Keep your pipeline user friendly. Create a good user experience for the team with clear job names, style your config for a better overview, and write comments for variables and hard to understand code. \n\n## Why GitLab works for a small team\n\n“The most important thing is that GitLab is a powerful CI/CD solution with high customization,” Philipp says. There is one home for all projects, without dependencies on one another. With Jenkins, even small exploratory changes can impact the larger job. “With GitLab, you don’t have dependency between branches. So, if you’re trying something new for your CI, you can do it simply in your branch and the master branch will not be affected by the changes,” Philipp says.\n\nThe CI is low maintenance, which is a useful timesaver for a smaller team. “The CI provides us with really low maintenance time. So, usually we don’t have to care about our CI for a month or more,” Philipp says.\n\nTo learn more about adSoul’s migration to GitLab, watch Philipp’s talk from GitLab Commit London.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/C5xfw0ydh2k\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n","engineering",[109,563,731,754,755,9],"performance","startups",{"slug":757,"featured":6,"template":734},"adsoul-devops-transition-to-gitlab-ci","content:en-us:blog:adsoul-devops-transition-to-gitlab-ci.yml","Adsoul Devops Transition To Gitlab Ci","en-us/blog/adsoul-devops-transition-to-gitlab-ci.yml","en-us/blog/adsoul-devops-transition-to-gitlab-ci",{"_path":763,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":764,"content":770,"config":776,"_id":778,"_type":14,"title":779,"_source":16,"_file":780,"_stem":781,"_extension":19},"/en-us/blog/application-modernization-examples",{"title":765,"description":766,"ogTitle":765,"ogDescription":766,"noIndex":6,"ogImage":767,"ogUrl":768,"ogSiteName":720,"ogType":721,"canonicalUrls":768,"schema":769},"Examples of legacy modernisation projects","Discover how four teams committed to the application modernization process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671258/Blog/Hero%20Images/just-commit-blog-cover.png","https://about.gitlab.com/blog/application-modernization-examples","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Examples of legacy modernisation projects\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-03-14\",\n      }",{"title":765,"description":766,"authors":771,"heroImage":767,"date":773,"body":774,"category":752,"tags":775},[772],"Chrissie Buchanan","2019-03-14","\n\nFine wine and cheese. Whiskey. Paul Rudd. There are a lot of things that get better with age – legacy systems are _not_ one of them.\n\n## The true cost of legacy systems\n\nOver time, the true cost of legacy systems is enormous: from additional resources needed to maintain them, to lost productivity, they can hinder investments in long-term growth. In highly regulated industries, they can even be a financial liability.\n[Health Insurance Portability and Accountability Act (HIPAA) violations in 2018 resulted in over $28 million in fines](https://compliancy-group.com/hipaa-fines-directory-year/), many of them due to data breaches.\nAs legacy systems grow older, it's [easy to miss critical security patches (if any are even available)](https://www.globalscape.com/blog/how-high-risk-legacy-systems-are-hurting-your-business), making your system more vulnerable to malicious actors ready to use old Java and SSL exploits to expose your network.\n\nEven if we can all agree that legacy system modernization is important, it still takes work.\n[Analysis paralysis is a real phenomenon in the digital transformation journey](/blog/beyond-application-modernization-trends/).\nRipping off the band-aid and committing to faster deployment feels overwhelming, and there are so many application modernization trends to consider. But not taking action puts a ceiling on growth.\n\n## Status quo \u003C Innovation\n\nMany large enterprises feel tied down to current practices because there just aren't enough resources left to innovate once legacy systems are maintained.\nFor example, [the greater part of the IT-related federal budget of the United States ($80 billion) goes to maintaining legacy systems.](https://www.spiria.com/en/blog/method-and-best-practices/cost-legacy-systems/)\nWhen large companies can only devote 20 percent of their budget to software modernization, things move even more slowly.\nObsolete systems create a vicious cycle where enterprises feel they have to choose between innovation or keeping things running.\n\nInstead of focusing on a full rip-and-replace of legacy systems, an application modernization strategy that identifies specific challenges reduces potential disruptions.\nMaking goals and achieving them one step at a time can make a big impact.\n\n## How to modernize applications\n\nThese examples of legacy application modernization show how four teams identified challenges, set manageable goals, and decided to [#JustCommit](https://twitter.com/search?q=just+commit) to development efficiency.\n\n### 1. Leveraging microservices\n\nWith a monolithic architecture, everything is developed, deployed, and scaled together.\nWith microservices, each component is broken out and deployed individually as services and the services communicate with each other via API calls.\n[Leveraging microservices allows teams to deploy faster and achieve scale, all at a lower cost](/topics/microservices/).\nAsk Media Group recently participated in a webcast where they discussed their transition from monoliths to microservices leveraging containers, Kubernetes, and AWS.\n\n[Watch the webcast](/webcast/cloud-native-transformation/)\n{: .alert .alert-gitlab-purple}\n\n### 2. Improving automation\n\nEquinix, a leading global data center company with over 180+ colocation facilities across five continents, wanted a solution that would help developers code better and faster, to bring customers new features quickly.\nWhile their old system was fine in the beginning, they needed a more robust solution that could meet their enterprise control and scaling needs. See how Equinix increased the agility of their developers, without sacrificing quality, through automation.\n\n{: .alert .alert-gitlab-purple}\n\n### 3. Simplifying the toolchain\n\nGoldman Sachs, one of the largest financial institutions in the world with over $1.5 trillion in assets, had some challenges in their technology division.\nAs a critical center of the financial provider's business, speed is essential, but a complex toolchain with too many parts was slowing them down.\nIn order to have faster deployment cycles and increase concurrent development, they knew they needed to simplify their toolchain. One cohesive environment helped them improve visibility and efficiency.\n\n[Read the case study](/customers/goldman-sachs/)\n{: .alert .alert-gitlab-purple}\n\n### 4. Reducing lifecycles\n\nChris Hill, Head of Systems Engineering for Infotainment at Jaguar Land Rover, shared his team's journey from feedback loops of 4-6 weeks to _just 30 minutes_ at the DevOps Enterprise Summit London in 2018.\nWho says you need to be stuck with a traditional release cadence?\n\n[Watch the presentation](/blog/chris-hill-devops-enterprise-summit-talk/)\n{: .alert .alert-gitlab-purple}\n\nAre you ready to tackle application modernization? [Just commit.](/blog/application-modernization-best-practices/)\n",[109,563,9],{"slug":777,"featured":6,"template":734},"application-modernization-examples","content:en-us:blog:application-modernization-examples.yml","Application Modernization Examples","en-us/blog/application-modernization-examples.yml","en-us/blog/application-modernization-examples",{"_path":783,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":784,"content":788,"config":797,"_id":799,"_type":14,"title":800,"_source":16,"_file":801,"_stem":802,"_extension":19},"/en-us/blog/atlassian-ending-data-center-as-gitlab-maintains-deployment-choice",{"config":785,"title":786,"description":787},{"noIndex":6},"Atlassian ending Data Center as GitLab maintains deployment choice","As Atlassian transitions Data Center customers to cloud-only, GitLab presents a menu of deployment choices that map to business needs.",{"title":786,"description":787,"authors":789,"heroImage":791,"date":792,"body":793,"category":576,"tags":794},[790],"Emilio Salvador","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098354/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_5XrohmuWBNuqL89BxVUzWm_1750098354056.png","2025-10-07","Change is never easy, especially when it's not your choice. Atlassian's announcement that [all Data Center products will reach end-of-life by March 28, 2029](https://www.atlassian.com/blog/announcements/atlassian-ascend), means thousands of organizations must now reconsider their DevSecOps deployment and infrastructure. But you don't have to settle for deployment options that don't fit your needs. GitLab maintains your freedom to choose — whether you need self-managed for compliance, cloud for convenience, or hybrid for flexibility — all within a single AI-powered DevSecOps platform that respects your requirements.\n\nWhile other vendors force migrations to cloud-only architectures, GitLab remains committed to supporting the deployment choices that match your business needs. Whether you're managing sensitive government data, operating in air-gapped environments, or simply prefer the control of self-managed deployments, we understand that one size doesn't fit all.\n\n## The cloud isn't the answer for everyone\n\nFor the many companies that invested millions of dollars in Data Center deployments, including those that migrated to Data Center [after its Server products were discontinued](https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/), this announcement represents more than a product sunset. It signals a fundamental shift away from customer-centric architecture choices, forcing enterprises into difficult positions: accept a deployment model that doesn't fit their needs, or find a vendor that respects their requirements.\n\nMany of the organizations requiring self-managed deployments represent some of the world's most important organizations: healthcare systems protecting patient data, financial institutions managing trillions in assets, government agencies safeguarding national security, and defense contractors operating in air-gapped environments.\n\nThese organizations don't choose self-managed deployments for convenience; they choose them for compliance, security, and sovereignty requirements that cloud-only architectures simply cannot meet. Organizations operating in closed environments with restricted or no internet access aren't exceptions — they represent a significant portion of enterprise customers across various industries.\n\n![GitLab vs. Atlassian comparison table](https://res.cloudinary.com/about-gitlab-com/image/upload/v1759928476/ynl7wwmkh5xyqhszv46m.jpg)\n\n## The real cost of forced cloud migration goes beyond dollars\n\nWhile cloud-only vendors frame mandatory migrations as \"upgrades,\" organizations face substantial challenges beyond simple financial costs:\n\n* **Lost integration capabilities:** Years of custom integrations with legacy systems, carefully crafted workflows, and enterprise-specific automations become obsolete. Organizations with deep integrations to legacy systems often find cloud migration technically infeasible.\n\n* **Regulatory constraints:** For organizations in regulated industries, cloud migration isn't just complex — it's often not permitted. Data residency requirements, air-gapped environments, and strict regulatory frameworks don't bend to vendor preferences. The absence of single-tenant solutions in many cloud-only approaches creates insurmountable compliance barriers.\n\n* **Productivity impacts:** Cloud-only architectures often require juggling multiple products: separate tools for planning, code management, CI/CD, and documentation. Each tool means another context switch, another integration to maintain, another potential point of failure. GitLab research shows [30% of developers spend at least 50% of their job maintaining and/or integrating their DevSecOps toolchain](https://about.gitlab.com/developer-survey/). Fragmented architectures exacerbate this challenge rather than solving it.\n\n## GitLab offers choice, commitment, and consolidation\n\nEnterprise customers deserve a trustworthy technology partner. That's why we've committed to supporting a range of deployment options — whether you need on-premises for compliance, hybrid for flexibility, or cloud for convenience, the choice remains yours. That commitment continues with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI solution that supports developers at every stage of their workflow.\n\nBut we offer more than just deployment flexibility. While other vendors might force you to cobble together their products into a fragmented toolchain, GitLab provides everything in a **comprehensive AI-native DevSecOps platform**. Source code management, CI/CD, security scanning, Agile planning, and documentation are all managed within a single application and a single vendor relationship.\n\nThis isn't theoretical. When [Airbus](https://about.gitlab.com/customers/airbus/) and [Iron Mountain](https://about.gitlab.com/customers/iron-mountain/) evaluated their existing fragmented toolchains, they consistently identified challenges: poor user experience, missing functionalities like built-in security scanning and review apps, and management complexity from plugin troubleshooting. **These aren't minor challenges; they're major blockers for modern software delivery.**\n\n## Your migration path: Simpler than you think\n\nWe've helped thousands of organizations migrate from other vendors, and we've built the tools and expertise to make your transition smooth:\n\n* **Automated migration tools:** Our [Bitbucket Server importer](https://docs.gitlab.com/user/project/import/bitbucket_server/) brings over repositories, pull requests, comments, and even Large File Storage (LFS) objects. For Jira, our [built-in importer](https://docs.gitlab.com/user/project/import/jira/) handles issues, descriptions, and labels, with professional services available for complex migrations.\n\n* **Proven at scale:** A 500 GiB repository with 13,000 pull requests, 10,000 branches, and 7,000 tags is likely to [take just 8 hours to migrate](https://docs.gitlab.com/user/project/import/bitbucket_server/) from Bitbucket to GitLab using parallel processing.\n\n* **Immediate ROI:** A [Forrester Consulting Total Economic Impact™ study commissioned by GitLab](https://about.gitlab.com/resources/study-forrester-tei-gitlab-ultimate/) found that investing in GitLab Ultimate confirms these benefits translate to real bottom-line impact, with a three-year 483% ROI, 5x time saved in security related activities, and 25% savings in software toolchain costs.\n\n## Start your journey to a unified DevSecOps platform\n\nForward-thinking organizations aren't waiting for vendor-mandated deadlines. They're evaluating alternatives now, while they have time to migrate thoughtfully to platforms that protect their investments and deliver on promises.\n\nOrganizations invest in self-managed deployments because they need control, compliance, and customization. When vendors deprecate these capabilities, they remove not just features but the fundamental ability to choose environments matching business requirements.\n\nModern DevSecOps platforms should offer complete functionality that respects deployment needs, consolidates toolchains, and accelerates software delivery, without forcing compromises on security or data sovereignty.\n\n[Talk to our sales team](https://about.gitlab.com/sales/) today about your migration options, or explore our [comprehensive migration resources](https://about.gitlab.com/move-to-gitlab-from-atlassian/) to see how thousands of organizations have already made the switch.\n\nYou also can [try GitLab Ultimate with GitLab Duo Enterprise](https://about.gitlab.com/free-trial/devsecops/) for free for 30 days to see what a unified DevSecOps platform can do for your organization.",[9,573,795,796],"product","features",{"featured":91,"template":734,"slug":798},"atlassian-ending-data-center-as-gitlab-maintains-deployment-choice","content:en-us:blog:atlassian-ending-data-center-as-gitlab-maintains-deployment-choice.yml","Atlassian Ending Data Center As Gitlab Maintains Deployment Choice","en-us/blog/atlassian-ending-data-center-as-gitlab-maintains-deployment-choice.yml","en-us/blog/atlassian-ending-data-center-as-gitlab-maintains-deployment-choice",{"_path":804,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":805,"content":811,"config":818,"_id":820,"_type":14,"title":821,"_source":16,"_file":822,"_stem":823,"_extension":19},"/en-us/blog/aws-gitlab-serverless-webcast",{"title":806,"description":807,"ogTitle":806,"ogDescription":807,"noIndex":6,"ogImage":808,"ogUrl":809,"ogSiteName":720,"ogType":721,"canonicalUrls":809,"schema":810},"How to deploy AWS Lambda applications with ease","Highlights from our serverless webcast with AWS exploring the Serverless Application Model.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666262/Blog/Hero%20Images/default-blog-image.png","https://about.gitlab.com/blog/aws-gitlab-serverless-webcast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy AWS Lambda applications with ease\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-04-29\",\n      }",{"title":806,"description":807,"authors":812,"heroImage":808,"date":813,"body":814,"category":815,"tags":816},[772],"2020-04-29","\n\nIn the [Cloud Native Computing Foundation (CNCF) 2019 survey](https://www.cncf.io/blog/2019-cncf-survey-results-are-here-deployments-are-growing-in-size-and-speed-as-cloud-native-adoption-becomes-mainstream/), 41% of respondents use serverless technology. Among those using serverless, 80% use a hosted platform vs. 20% who use installable software. Of the 80% using a hosted platform, the top tool is AWS Lambda (53%).\n\nAs organizations continue to explore the power and scalability of serverless computing, AWS Lambda remains a large part of the conversation. On April 9, AWS and GitLab hosted a serverless webcast to demonstrate how teams can use [GitLab CI/CD](/topics/ci-cd/) and the AWS Serverless Application Model (SAM) to build, test, and deploy Lambda applications. For the serverless webcast, we showed attendees how to:\n\n*   Use and install the AWS SAM CLI\n*   Create a SAM application including a Lambda function and API\n*   Build, test, and deploy the application using GitLab CI/CD\n\nWhether you’re an AWS customer, a serverless newbie, or wanting to explore new ways to utilize GitLab CI/CD, this webcast had something for everyone. We’ve compiled some highlights from the discussion and a link to the on-demand webcast.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nWatch the webcast with AWS and GitLab to learn all about serverless - [Tune in here](/webcast/aws-gitlab-serverless/)!\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\n## What is the Serverless Application Model (SAM)?\n\nTooling and workflows are the biggest roadblocks to adopting serverless. Organizations love the scalability and automation of serverless but don’t believe that they have the tools to implement it effectively. In this webcast, we showed how teams can seamlessly use SAM with GitLab CI/CD for their serverless application development.\n\n[AWS SAM](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/what-is-sam.html) is an open source framework for building serverless applications on AWS. It can be considered an extension to CloudFormation that makes it easier to define and deploy AWS resources – such as Lambda functions, API Gateway APIs and DynamoDB tables – commonly used in serverless applications.\n\nIn addition to its templating capabilities, SAM also includes a CLI for testing and deployment allows teams to define the resources they need as code. So that includes the serverless functions, but can also include any of the rest of the AWS suite of tools. SAM works by taking all of those things and creates a cloud formation stack from a SAM template. Next it automatically deploys those various functions and other AWS components and gets the IAM configured correctly between all of them so that an application can run not only Lambda functions, but also leverage the rest of the AWS stack to create an entire system and application.\n\n\n## Why is SAM a great tool for enterprise teams?\n\nSenior developer evangelist at GitLab, [Brendan O’Leary](/company/team/#brendan), is a Node.js developer at heart. \"For better or worse,\" he laughs. When putting together the presentation, he noted that SAM offered templates not only in Python but in Node.js as well. \"I think applies directly to the enterprise because those teams are going to be diverse. They're going to have different needs, they're going to choose the language that best fits those needs. It was great to have this starter template that I could start with Node.js 12X to really get started on coding in a comfortable environment for me.\"\n\nThe SAM templates can also be an asset for enterprise teams because they streamline a lot of the backend work. In the project presented during the webcast, we were able to start from a SAM template to orchestrate the IAM permissions we needed instead of coding all of the cloud formation ourselves. For a large or distributed team, this makes SAM a great out-of-the-box tool for serverless applications.\n\n\n## The benefits of going serverless\n\nRam Dileepan, solutions architect at AWS, highlighted this quote from AWS CTO Werner Vogels: No server is easier to manage than no server at all. \"The main goal of modern application development is to automate and abstract as much as possible from the customer. So what we do as an AWS cloud, we abstract a lot of the details from developers so they can actually focus on building applications instead of working with infrastructure.\"\n\nFor teams looking to incorporate serverless, it can provide a number of benefits:\n\n*   Scalable\n*   Pay for what you use\n*   Availability\n\n\n## Serverless and microservices best practices\n\nWhile serverless means that, from the developer perspective, servers are not actively managed, there is still work to do. When you design the application, you have to design how are you going to monitor it and what you are going to monitor. \"Even when you go to serverless, you can actually just follow the standard development best practices,\" says Ram. Here he presented his three serverless/microservices best practices:\n\n*   Treat your infrastructure the way you treat your code\n*   Set up an automated integration and deployment pipeline\n*   Build with monitoring and observability from day one\n\nIn addition to going over the SAM CLI and creating a GitLab CI/CD pipeline, Brendan O’Leary and Ram Dileepan also fielded a variety of questions in the live Q&A. To watch the full webcast and learn more about serverless with GitLab and AWS, click the link below or in the header.\n\n[Watch our serverless webcast Ram Dileepan of AWS and Brendan O'Leary of GitLab 🍿](/webcast/aws-gitlab-serverless/)\n{: .alert .alert-gitlab-purple}\n","insights",[817,9,109],"webcast",{"slug":819,"featured":6,"template":734},"aws-gitlab-serverless-webcast","content:en-us:blog:aws-gitlab-serverless-webcast.yml","Aws Gitlab Serverless Webcast","en-us/blog/aws-gitlab-serverless-webcast.yml","en-us/blog/aws-gitlab-serverless-webcast",{"_path":825,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":826,"content":832,"config":839,"_id":841,"_type":14,"title":842,"_source":16,"_file":843,"_stem":844,"_extension":19},"/en-us/blog/best-practices-leading-orgs-to-release-software-faster",{"title":827,"description":828,"ogTitle":827,"ogDescription":828,"noIndex":6,"ogImage":829,"ogUrl":830,"ogSiteName":720,"ogType":721,"canonicalUrls":830,"schema":831},"4 best practices leading orgs to release software faster","GitLab's 2023 Global DevSecOps Survey illuminates the strategies that organizations deploying more frequently have in common.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663908/Blog/Hero%20Images/2023-devsecops-report-blog-banner2.png","https://about.gitlab.com/blog/best-practices-leading-orgs-to-release-software-faster","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 best practices leading orgs to release software faster\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kristina Weis\"}],\n        \"datePublished\": \"2023-06-08\",\n      }",{"title":827,"description":828,"authors":833,"heroImage":829,"date":835,"body":836,"category":815,"tags":837},[834],"Kristina Weis","2023-06-08","\nReleasing software faster is one of the biggest goals of many organizations — and for good reason. It helps them keep up with competitors, land and keep more customers, improve employee satisfaction, and much more. But maintaining that velocity requires investment in processes and technologies that help DevSecOps teams deliver, secure, and deploy software faster without compromising quality.\n\nIn our [2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/) we asked more than 5,000 development, security, and operations professionals about everything from deployment frequency to the practices teams have adopted – all to learn what the most agile and efficient organizations have in common. One respondent, a director of IT security in the retail sector, summed up the challenge as follows: “Software customers are increasingly vocal and demanding, expecting faster releases and greater customizability. Developers will need to keep up with these demands while still maintaining stability and usability.”\n\nSo what’s helping organizations be more productive and efficient? Here are four of the best practices that, according to the survey, help organizations release software faster and deploy more frequently:\n\n## 1. Running applications in the cloud\nOne of the benefits people commonly attribute to deploying to the cloud is increased development speed. As it turns out, this year’s survey shows there’s some serious truth to that. Respondents with at least a quarter of their applications in the cloud were 2.2 times more likely to be releasing software faster than they were a year ago — and respondents with at least half of their applications in the cloud were 4.2 times more likely to deploy to production multiple times per day.\n\nSeveral respondents commented on the value of the cloud while also acknowledging the complexities cloud computing can bring to software development. An IT operations manager in the industrial manufacturing sector shared that “developing software that is designed for the cloud-native environment” is one of the top challenges facing software development this year. Likewise, an IT operations manager in the telecommunications sector said: “With the increase in the use of cloud computing and IoT devices, there is a greater need for secure coding practices to protect sensitive data from cyber attacks.” As organizations move to a cloud-first model for software development, they will need to adopt technologies that allow them to build natively in the cloud while keeping security top of mind throughout the development process.\n\n## 2. BizDevOps\nThough DevOps and DevSecOps mostly steal the show in terms of methodologies, some organizations go a step further and [practice BizDevOps](https://about.gitlab.com/blog/a-snapshot-of-modern-devops-practices-today/) — that is, incorporating business teams alongside development, security, and operations teams. An IT operations manager in the software sector emphasized the importance of collaboration with the business, sharing that “as software projects become larger and more complex, developers will need to work closely with other team members, including designers, testers, project managers, and business stakeholders.” This approach appears to be paying off for some: Respondents whose organizations practice BizDevOps were 1.4 times more likely to be releasing software faster than they were a year ago.\n\n## 3. CI/CD\nIt’s not surprising that automating the software development lifecycle with [CI/CD](https://docs.gitlab.com/ee/ci/) would help teams release software faster and more efficiently; however, it’s nice to see confirmation and put some numbers to the difference it can make. The survey shows that respondents [practicing CI/CD](https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices/) were twice as likely to deploy multiple times per day and 1.2 times more likely to release software faster than they did a year ago.\n\nDespite the value of CI/CD for driving efficiency, respondents also identified challenges. For instance, an IT operations associate in the aerospace/defense sector pointed to “management that doesn't understand CI/CD at all” as a blocker to more efficient software development. Meanwhile, a software development intern in the biotech sector shared that “tools to automate CI/CD, together with code editors, APM software, and defect trackers, can help with a faster and quality development cycle,” but “companies are hesitant to spend on tools that can help increase their developers’ productivity.” These responses underscore the value of investing in tools that unify CI/CD with other DevSecOps practices — such as incorporating security early in the development process and creating tighter feedback loops — to help organizations break down development silos.\n\n## 4. DORA and other metrics\nOrganizations that [make a conscious effort to track key development metrics](https://about.gitlab.com/blog/how-zoopla-uses-dora-metrics-and-your-team-can-too/) are more likely to improve them, according to the survey. This makes sense because by virtue of an organization choosing to track a metric, they’re signaling to their teams that it’s important, likely reminding them of whether the metric is improving (or not) periodically, and quite possibly prioritizing initiatives aimed at improving those metrics. We found that respondents whose organizations track their [DORA metrics](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html) and other similar metrics were 1.4 times more likely to deploy multiple times per day.\n\n## A deeper dive on productivity and efficiency\n\nFor a deeper look into release velocity and deployment frequency, and all the practices that made respondents more likely to release software faster and deploy multiple times per day, check out our [2023 DevSecOps Report: Productivity & Efficiency Within Reach](https://about.gitlab.com/developer-survey/).\n\nThe report also digs into two other key factors that can have a big impact on productivity and efficiency: how long it takes to onboard new developers and how difficult or easy it is for organizations to attract, hire, and retain developers. We’ll show you where things stand and the practices that made respondents more likely to be successful.\n\n_[Read the highlights from “Security Without Sacrifices,” the first report in our 2023 Global DevSecOps Report series.](/blog/gitlab-survey-highlights-wins-challenges-as-orgs-adopt-devsecops/)_\n",[838,109,9,573],"developer survey",{"slug":840,"featured":6,"template":734},"best-practices-leading-orgs-to-release-software-faster","content:en-us:blog:best-practices-leading-orgs-to-release-software-faster.yml","Best Practices Leading Orgs To Release Software Faster","en-us/blog/best-practices-leading-orgs-to-release-software-faster.yml","en-us/blog/best-practices-leading-orgs-to-release-software-faster",{"_path":846,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":847,"content":853,"config":860,"_id":862,"_type":14,"title":863,"_source":16,"_file":864,"_stem":865,"_extension":19},"/en-us/blog/building-a-cicd-pipeline-in-20-mins",{"title":848,"description":849,"ogTitle":848,"ogDescription":849,"noIndex":6,"ogImage":850,"ogUrl":851,"ogSiteName":720,"ogType":721,"canonicalUrls":851,"schema":852},"How to build a CI/CD pipeline in 20 minutes or less","Deploying your pipeline to Kubernetes is just a 'git push' away using GitLab's Auto DevOps feature.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666903/Blog/Hero%20Images/pipeline.jpg","https://about.gitlab.com/blog/building-a-cicd-pipeline-in-20-mins","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build a CI/CD pipeline in 20 minutes or less\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-09-26\",\n      }",{"title":848,"description":849,"authors":854,"heroImage":850,"date":856,"body":857,"category":752,"tags":858},[855],"Sara Kassabian","2019-09-26","\nIn software development, time really is money. GitLab users know that by using our [Auto DevOps functionality](https://docs.gitlab.com/ee/topics/autodevops/), you can move from code to production in just two simple steps.\n\n[Eddie Zaneski](https://gitlab.com/eddiezane) of Digital Ocean joined us in Brooklyn at [GitLab Commit, our inaugural user conference](/blog/wrapping-up-commit/). In an informative and light-hearted talk, Eddie demonstrated how to build and deploy a [CI/CD pipeline](/topics/ci-cd/) to a Kubernetes cluster from scratch or by using GitLab’s [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) tooling in less than 20 minutes.\n\nIn the demo, Eddie and his co-founder were really wingin’ it when building an app for the “startup” he used for this demo, the Screaming Chicken Club.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Massive shoutout to \u003Ca href=\"https://twitter.com/kamaln7?ref_src=twsrc%5Etfw\">@kamaln7\u003C/a> for building \u003Ca href=\"https://t.co/kke5hc2FC8\">https://t.co/kke5hc2FC8\u003C/a> and lending it to me for \u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a>\u003C/p>&mdash; Eddie Zaneski (@eddiezane) \u003Ca href=\"https://twitter.com/eddiezane/status/1174044146002288640?ref_src=twsrc%5Etfw\">September 17, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n“I'm trying to raise money right now and VCs are caring about my tech,” said Eddie of his hypothetical start-up. “An easy way to score credit with VCs by having a super secure and well-thought-out DevOps pipeline, and that's where GitLab really comes into play here.”\n\n[Auto DevOps](/topics/devops/) is an out-of-the-box solution that helps move your code into production faster by automating the complex components of building a CI/CD pipeline, such as: “Building your application into a container; checking it for vulnerabilities; checking it for dependencies, checking it for licenses; deploying that to a Kubernetes cluster; setting up host names; DNS, TLS certs; automatically renewing them for you and doing performance testing.”\n\nSo where do you start?\n\n## Spin up your Kubernetes cluster\n\nGitLab has an airtight integration with Kubernetes that makes it possible to [deploy software from GitLab’s CI/CD pipeline to Kubernetes](/solutions/kubernetes/) by using Auto DevOps or by building the pipeline yourself. Either way, the first step will be to [configure a new Kubernetes cluster to deploy your application](https://docs.gitlab.com/ee/user/project/clusters/index.html).\n\nIt’s really as simple as toggling to the lefthand sidebar on GitLab and clicking Kubernetes > Operations > Add a Cluster. This process works for [GCP or GKE users](https://docs.gitlab.com/ee/user/project/clusters/index.html#add-new-gke-cluster), as well as those that are not on Google Cloud or are using an on-prem solution. In the demo, Eddie used Digital Ocean’s managed Kubernetes service to create the cluster, select the data center, and pick the size of the node. Eddie estimated this process would take anywhere from three to five minutes.\n\nThe next step is to integrate the Kubernetes cluster into the project, which requires a number of manual tasks, including grabbing the URL for the Kubernetes API server, creating a service account and binding it to the cluster admin, and grabbing the service token that’s generated. In the spirit of innovative shortcuts, Eddie created a [kubectl plugin](https://gitlab.com/eddiezane/kubectl-gitlab_bootstrap) that makes it even easier to add the Kubernetes cluster to the associated GitLab project.\n\n“This is actually going to automatically bootstrap a Kubernetes cluster into your GitLab project, create all the service accounts, make all the GitLab API requests, and take care of everything under the hood.” Thanks, Eddie!\n\nNext, just grab the GitLab project ID, and run:\n\n`kubectl gitlab-bootstrap gitlab-project-id`\n\nThe result is a URL. Follow the URL to see more about the Kubernetes cluster in your GitLab project.\n\n## GitLab-managed applications make your life easier\n\nOnce you’re there, you’ll see a list of [GitLab-managed applications](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html). These apps can be installed in just one click to help manage your new Kubernetes cluster.\n\n1. [Helm](https://docs.gitlab.com/ee/update/removals.html): Install Helm first, because it is the package manager for Kubernetes and is required to install the other applications.\n2. [Ingress](https://docs.gitlab.com/ee/update/removals.html): Once Helm is installed, you can install the [Ingress controller](https://docs.gitlab.com/ee/update/removals.html), which will handle all the routing and mapping within the cluster and will create a load balancer behind the scenes. **Copy the IP address that’s displayed; you’ll need it later.**\n3. [Prometheus](https://docs.gitlab.com/ee/update/removals.html): An open source tool that monitors your deployed applications.\n4. [Cert-Manager](https://docs.gitlab.com/ee/update/removals.html): This will handle all the certificates and make sure everything is up to date.\n5. [GitLab Runner](https://docs.gitlab.com/ee/update/removals.html): Lets you run your GitLab CI/CD on your own host, or within the Kubernetes cluster.\n\nThe superstar of the bunch is GitLab Runner, the open source project that is used to run your CI/CD jobs and send the results back to GitLab.\n\nChanges include:\n\n## Launch Auto DevOps with the click of a button\n\nOnce you’ve created your Kubernetes cluster and installed the required applications, [launch the Auto DevOps process with the click of a button](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html), literally.\n\n![Enable Auto DevOps](https://about.gitlab.com/images/blogimages/guide_enable_autodevops.jpg){: .shadow.medium.center}\n\nBy enabling Auto DevOps and selecting your deployment strategy (here is where you need the Ingress IP address), you kick off the CI/CD pipeline.\n\n## Or launch your own Auto DevOps process\n\nDon’t want to use our out-of-the-box Auto DevOps feature? You don’t have to. The good news is the underlying source code is available to you for each component of the deployment process, making it easy for you to parse out what jobs you'd like to run.\n\n“The great thing about GitLab being open source is nothing is magic, right? All this stuff is source code that we can all go look up and read,” says Eddie.\n\nThe source code for the entire out-of-the-box Auto DevOps process lives in [one YAML file](https://gitlab.com/gitlab-org/gitlab-foss/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml) in the GitLab repository. GitLab users are able to separately run jobs for each stage in the Auto DevOps process, from build to cleanup, simply by copy/pasting the [underlying source code](/solutions/source-code-management/) into a properly configured terminal.\n\nThe individual templates and components for the important jobs in each Auto DevOps stage are included in the YAML file. You can select which components you’d like to use. Note that nothing needs to be imported, because it all comes with your GitLab install.\n\nIn the demo, Eddie ran the jobs for the build and deploy stages as examples.\n\nRemember to return to the load balancer and grab the IP address Ingress created to configure your DNS, `git push`, then, viola! Your CI/CD pipeline is running.\n\n## A peek inside the pipeline\n\nDuring the demo Eddie went behind the scenes to explain what was happening inside the pipelines for the build and deploy jobs he started.\n\n### Build\n\n“It's going to take care of a lot of stuff behind the hood for us,” said Eddie. The pipeline uses Docker to build containers inside Docker, which will log in to our Kubernetes cluster’s container registry.\n\n“So GitLab automatically provides you with a container registry for your project,” said Eddie. “It's going to substitute in a whole bunch of environment variables and handles all the logins and generates the token, and all that. So we don't actually have to think about anything.”\n\nNext, the Docker base image loads. Eddie went into more detail about how to write up the Docker set-up, but the GitLab build component can automatically figure out the type of project you’re running and generates a Docker file with best practices to build the container.\n\n“So my project is building, compiling, pushing up my layers to the container registry, and then my build job should finish real quick and then my deploy job is going to kick off,” explained Eddie.\n\n### Deploy\n\nThe deploy job kicks off by spinning up a Helm chart that automatically fills the required information, such as the container ID, the host name, namespace, etc., into the template. Then it will create the Ingress ID, and then deploy the application.\n\n## Put your CI/CD pipelines on autopilot with GitLab and Kubernetes\n\nIn just a few minutes, Eddie was able to demonstrate two different ways to build a CI/CD pipeline by using GitLab and Kubernetes. While our Auto DevOps feature makes it so you don’t have to create a bunch of YAMLs from scratch (because, let’s face it, if you’re running Kubernetes you’re already running a ton of YAMLs), our open source Auto DevOps process makes it possible to pick and choose which components or jobs you’d like to run.\n\nWatch the entire video from GitLab Commit Brooklyn to see Eddie run a **third** CI/CD pipeline during his 17-minute talk.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/-shvwiBwFVI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nLike what you see? [Join us in London](/events/) on October 9 for our second GitLab Commit event with all new talks!\n",[859,9],"kubernetes",{"slug":861,"featured":6,"template":734},"building-a-cicd-pipeline-in-20-mins","content:en-us:blog:building-a-cicd-pipeline-in-20-mins.yml","Building A Cicd Pipeline In 20 Mins","en-us/blog/building-a-cicd-pipeline-in-20-mins.yml","en-us/blog/building-a-cicd-pipeline-in-20-mins",{"_path":867,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":868,"content":874,"config":879,"_id":881,"_type":14,"title":882,"_source":16,"_file":883,"_stem":884,"_extension":19},"/en-us/blog/ci-cd-the-ticket-to-multicloud",{"title":869,"description":870,"ogTitle":869,"ogDescription":870,"noIndex":6,"ogImage":871,"ogUrl":872,"ogSiteName":720,"ogType":721,"canonicalUrls":872,"schema":873},"CI/CD: The ticket to multicloud","Read our expert panel from MulticloudCon on how CI/CD and cloud-agnostic DevOps help organizations go multicloud and increase productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679235/Blog/Hero%20Images/cloud-native-predictions-2019.jpg","https://about.gitlab.com/blog/ci-cd-the-ticket-to-multicloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"CI/CD: The ticket to multicloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-01-17\",\n      }",{"title":869,"description":870,"authors":875,"heroImage":871,"date":876,"body":877,"category":815,"tags":878},[772],"2020-01-17","\n\nIn November 2019, we had the opportunity to co-host [MulticloudCon](https://multicloudcon.io/), a zero-day event with our partners at [Upbound](https://upbound.io/). The event featured experts in cloud, Kubernetes, database resources, CI/CD, security, and more, to learn how [multicloud is evolving](/topics/multicloud/) and empowering developers and operations experts across the industry.\n\nDevOps can play a major role in cloud usage. In this discussion from MulticloudCon, we assembled a panel of experts across the industry to talk about [CI/CD](/solutions/continuous-integration/) and DevOps in multiple clouds. As [multicloud](/topics/multicloud/) technology continues to evolve, tools can give organizations more control and flexibility on where their workloads live and where they deploy.\n\n![CI/CD MulticloudCon panelists](https://about.gitlab.com/images/blogimages/multicloudcon-panel.png){: .shadow.medium.center}\n\n## Panel highlights\n\n### Why multicloud is important:\n\n> “If we have a single point of failure on a cloud, it is really easy to have some downtime [or] an outage and be like, \"Well, it was my cloud provider's fault.\" But, to our customers, that doesn't matter. You as a company, we're down and that affects them.”\n– Ana Medina, Chaos Engineer at [Gremlin](https://www.gremlin.com/)\n\n> “There are a lot more applications now that are becoming event-driven and are relying on integrations with cloud providers. And if it's more than one, you can't just test on one provider and go well it works across the board. You need to be expanding your test coverage to cover multiple cloud providers.”\n– Denver Williams, DevOps/SRE Consultant at [Vulk Coop](http://vulk.coop/)\n\n\n### The challenges of multicloud:\n\n> “When you're running in multiple clouds, that also introduces problems… I'm talking more specifically about high availability and also fault tolerance and then disaster recovery. These are things people just think about, ‘Oh we need to connect, integrate.’ But at the end of the day, if you're serious about running these applications, you need to also think about those things. And introducing those complexities from the different cloud providers will definitely impact your operations.”\n– Angel Rivera, Developer Advocate at [CircleCI](https://circleci.com/)\n\n\n### How tools impact a multicloud strategy:\n\n> “One thing that helps a lot when you're working on deploys for multicloud is to choose tooling that is going to support multiple clouds off the bat… One thing you really want to avoid, if possible, is ending up with different workflows for different cloud providers. Because then you're testing with different CI/CD pipelines. It's different code and it's inevitably going to behave differently. And then you're going to run into weird bugs.” – Denver Williams\n\n> “When I'm talking to users and GitLab customers that are doing multicloud, they're doing a lot of orchestration and abstraction, and they're having to write an abstraction layer in order to homogenize a logic. A lot of folks have talked about Crossplane today. When I see these types of capabilities and Crossplane in that community emerging, that's pretty exciting because that's what I see a lot of folks writing all the time. That can just be pulled out into a tool and offloaded so that you can focus on the business logic.” – [William Chia](/company/team/#williamchia), Sr. Product Marketing Manager at GitLab\n\nLearn more about GitLab’s Crossplane integration in our [12.5 release](/releases/2019/11/22/gitlab-12-5-released/#crossplane-support-in-gitlab-managed-apps).\n\n\n### CI/CD and multicloud best practices:\n\n> “There's always going to be platform-specific code. Just keep that separate and then your actual YAML logic, keep it agnostic.” – Uma Mukkara, Co-founder and COO at [MayaData](https://mayadata.io/)\n\n> “At Gremlin we help companies avoid downtime. So, we're starting to work with integrations with CI/CD platforms so folks actually start having a stage that they run chaos engineering experiments... You can actually build a lot more testing around past outages that your company has had or maybe some of the large outages that we've seen around in the industry. Building testing around those scenarios, [we’re] making sure the caching layers are able to handle when one of your services goes down... If you're caching layer limits out, the other services that are dependent on it are able to still continue providing a good user experience.” – Ana Medina\n\n> “I always encourage people who are writing pipelines in our platform to do some checks against APIs that they use so that they can just fail their builds right away, instead of wasting money and effort and going to build that. It's going to eventually fail.” – Angel Rivera\n\nMulticloud is made possible through cloud native applications built from containers using services from different cloud providers, and allows for multiple services to be managed in one architecture. CI/CD plays a big role in workflow portability, ensuring workflows stay consistent (no matter where projects are deployed).\n\nWatch the full panel discussion below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Sx02_fyaGgc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPhoto by [Marc Wieland](https://unsplash.com/photos/zrj-TPjcRLA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/clouds?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n\n",[109,9,859],{"slug":880,"featured":6,"template":734},"ci-cd-the-ticket-to-multicloud","content:en-us:blog:ci-cd-the-ticket-to-multicloud.yml","Ci Cd The Ticket To Multicloud","en-us/blog/ci-cd-the-ticket-to-multicloud.yml","en-us/blog/ci-cd-the-ticket-to-multicloud",{"_path":886,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":887,"content":893,"config":898,"_id":900,"_type":14,"title":901,"_source":16,"_file":902,"_stem":903,"_extension":19},"/en-us/blog/cloud-adoption-roadmap",{"title":888,"description":889,"ogTitle":888,"ogDescription":889,"noIndex":6,"ogImage":890,"ogUrl":891,"ogSiteName":720,"ogType":721,"canonicalUrls":891,"schema":892},"Cloud strategy and adoption roadmap for businesses","Everything you need to know for transforming your business to the cloud and how to plan out the perfect strategy for it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680891/Blog/Hero%20Images/cloud-adoption-roadmap.jpg","https://about.gitlab.com/blog/cloud-adoption-roadmap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Cloud strategy and adoption roadmap for businesses\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-12-05\",\n      }",{"title":888,"description":889,"authors":894,"heroImage":890,"date":895,"body":896,"category":815,"tags":897},[772],"2019-12-05","\n\nOrganizations continue to focus on scalability and growth, and cloud can be a valuable asset in those strategies. When it comes to cloud adoption, not every organization takes the same path – some already work in multiple clouds, some work in industries with strict compliance standards, and some may only be starting their cloud journey.\n\nIt’s estimated that investments in infrastructure to support cloud computing account for [more than a third of all IT spending](https://www.zdnet.com/article/top-cloud-providers-2019-aws-microsoft-azure-google-cloud-ibm-makes-hybrid-move-salesforce-dominates-saas/), and teams want to make sure they’re investing in the right things that benefit them for the long-term. In order to implement the best cloud strategies to meet your needs, a cloud adoption roadmap should have four key steps:\n\n*   Assessment\n*   Planning\n*   Implementation\n*   Optimization\n\n## What is cloud adoption?\n\nCloud adoption aims to mitigate risk, reduce costs, and gain organizational scalability of database capabilities. “The cloud” refers to software and services that live and operate online rather than in an on-premise network of servers or local computers, and it creates incomparable flexibility in daily operations. The cloud provides the necessary speed for an organization to launch new releases quickly, securely, and efficiently.\n\n## Assessing cloud-readiness\n\nCloud adoption does not guarantee scalability and growth on its own. In order for cloud implementation to be successful, organizations have to identify challenges and expertise gaps that affect their cloud-readiness. For example, a lift-and-shift to the cloud isn’t going to produce great results if existing business applications are monolithic and/or outdated. In this scenario, companies will need to commit to an [application modernization](/blog/application-modernization-best-practices/) strategy that makes a cloud investment worthwhile.\n\nFor large enterprises currently working in a traditional IT environment, there may be internal barriers such as lack of organizational buy-in, reluctance to invest the required resources in a multiyear effort, or even regulatory and compliance restraints. On average, [enterprise adoption remains low at around 20%](https://www.mckinsey.com/business-functions/mckinsey-digital/our-insights/cloud-adoption-to-accelerate-it-modernization), so it may be beneficial for these organizations to adopt cloud’s [agile](/solutions/agile-delivery/) and automated operating model within their traditional IT, at least in the short-term.\n\nAs part of the assessment stage of your cloud adoption roadmap, ask yourself the following questions:\n\n1. Do we have the internal expertise necessary for a cloud migration or will we need to implement an education or hiring plan?\n2. Do we have industry requirements or compliance regulations to consider?\n3. Will we need to tackle legacy application modernization as well?\n4. What strategies have worked for similar companies in similar industries?\n\n## Creating a plan\n\nAfter identifying challenges and opportunities to cloud adoption, the work on an implementation plan begins. Whether it’s a cloud transformation or just migrating from one cloud to another, it’s important to create actionable steps and have the right leadership in place to guide the process.\n\nKeeping your assessment in mind, your organization will need to decide which clouds and cloud models work best for your needs and business goals. You’ll need to evaluate public, private, or hybrid clouds, in addition to SaaS, IaaS and PaaS cloud models, to determine which combination fits within your limitations. Having leaders with expertise in these areas of cloud computing, rather than relying on information from the cloud service providers themselves, will ensure that decisions are unbiased with your unique needs in mind.\n\n![cloud models](https://about.gitlab.com/images/blogimages/cloud_models.png){: .shadow.medium.center}\n\nBut what if you want to use multiple cloud providers? This is where a [multicloud](/topics/multicloud/) approach can be beneficial.\n\n### What is multicloud?\n\nMulticloud describes [how enterprises use multiple cloud providers to meet different technical or business requirements](https://www.zdnet.com/article/multicloud-everything-you-need-to-know-about-the-biggest-trend-in-cloud-computing/). At its core, multicloud is made possible through cloud-native applications built from containers using services and allows for multiple services to be managed in one architecture. Research indicates [85% of enterprises currently operate in multiple clouds](https://www.ibm.com/blogs/cloud-computing/2018/10/19/survey-multicloud-management-tools/).\n\n\n\nDuring the planning phase of your cloud roadmap, consider the following:\n\n1. Do we have internal expertise to make sure we’re making the right decisions?\n2. Have we evaluated the different cloud models?\n3. Would a multicloud approach be a good fit?\n\n## Putting plans into action\n\nThe implementation phase usually requires multiple steps and thrives when teams are able to communicate and collaborate with each other. As plans change (and they inevitably will), high visibility ensures teams can adapt.\n\nIn our recent migration from Azure to GCP, we documented our progress publicly and leaned on three of our [core values](https://handbook.gitlab.com/handbook/values/): efficiency, iteration, and transparency. We believe in taking small steps and looking for the most [boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions) because that allows us to get feedback quickly and reduce cycle times. Whether migrating to the cloud for the first time, or just moving from one cloud to another, things rarely ever go smoothly. By practicing iteration, we were able to course correct and come up with the right solutions quickly. Learn how we put our values into action and watch our presentation at Google Cloud Next ‘19.\n\n[GitLab’s journey from Azure to GCP](/blog/gitlab-journey-from-azure-to-gcp/)\n{: .alert .alert-gitlab-purple .text-center}\n\nWhen implementing cloud strategies, expect your approach to DevOps to change as well.\n\n[DevOps](/topics/devops/) is all about developers and operations working together and using the cloud as a common language, and [cloud native app development](/topics/cloud-native/) will require a shift to a DevOps operating structure. Once you’ve decided on the cloud service and deployment models in your adoption roadmap, you’ll also need to evaluate which DevOps tools support your cloud initiatives. [Developer tools have a high capacity for driving cloud usage](/blog/gitlab-ci-cd-is-for-multi-cloud/) because once you have your application code hosted, the natural next step is finding a place to deploy it. For example, if you decided during the planning phase to adopt multicloud, having cloud-agnostic tools will play a big role in the success of that strategy.\n\nDuring the implementation phase of your cloud roadmap, consider the following:\n\n1. Take small steps and practice iteration so you can course correct effectively.\n2. Make sure teams have visibility into the cloud process and can collaborate as things progress.\n3. Ensure your DevOps structure will be able to support your cloud and cloud native application development initiatives.\n4. Evaluate developer tools and consider if cloud-agnostic tools would allow more flexibility with multiple clouds.\n\n## Cloud optimization and beyond\n\nWhile there will inevitably be a point when cloud models and DevOps tools have been implemented, a cloud adoption roadmap is really a never-ending journey for continuous improvement. By the time a cloud adoption timeline has been completed, there will be new technologies and new paths for cloud optimization already on the horizon. A solution you implemented may need to be deprecated in favor of something that works a little better. A valuable part of iteration is making decisions and acting quickly, and that is a process that never ends.\n\nIn [Cloud Powers the New Platform Economy](https://www.forrester.com/report/Cloud+Powers+The+New+Platform+Economy/-/E-RES120506), Forrester explains that you must automate, integrate, and orchestrate all the moving parts of your cloud to keep up with the pace of innovation the cloud economy demands. As you continue to improve your cloud ecosystems, consider the following:\n\n1. Are we keeping up with the pace of innovation and how can we improve?\n2. Are we investing in next-generation skills and providing continuing education opportunities?\n3. Are we evaluating new technologies?\n4. Are we managing our cloud effectively?\n\n\nCover image by [Matt Howard](https://unsplash.com/@thematthoward?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/journey?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[563,9],{"slug":899,"featured":6,"template":734},"cloud-adoption-roadmap","content:en-us:blog:cloud-adoption-roadmap.yml","Cloud Adoption Roadmap","en-us/blog/cloud-adoption-roadmap.yml","en-us/blog/cloud-adoption-roadmap",{"_path":905,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":906,"content":912,"config":918,"_id":920,"_type":14,"title":921,"_source":16,"_file":922,"_stem":923,"_extension":19},"/en-us/blog/cloud-native-architectures-made-easy",{"title":907,"description":908,"ogTitle":907,"ogDescription":908,"noIndex":6,"ogImage":909,"ogUrl":910,"ogSiteName":720,"ogType":721,"canonicalUrls":910,"schema":911},"Simplifying and optimizing cloud native architectures","Learn what cloud native architectures are, how to optimize them using GitLab's cohesive approach and what features you can use to help be more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671263/Blog/Hero%20Images/cloudarchitecture.jpg","https://about.gitlab.com/blog/cloud-native-architectures-made-easy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Simplifying and optimizing cloud native architectures\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Suri Patel\"}],\n        \"datePublished\": \"2019-11-13\",\n      }",{"title":907,"description":908,"authors":913,"heroImage":909,"date":915,"body":916,"category":300,"tags":917},[914],"Suri Patel","2019-11-13","\nMany teams embark on a journey to strengthen operations and development. Whether it’s battling monolithic applications by adopting containers and microservices or attempting to elevate a mature architecture by switching CI/CD tools, it is important to have a solution with robust cloud native support. When containers and cloud native workflows are easy to set and maintain, teams increase operational efficiency and can focus on delivering better products faster.\n\n## What goes into a cloud native architecture?\n\n[Cloud native applications](/topics/cloud-native/) are built using [microservices](/topics/microservices/) rather than a monolithic application structure. You can think of microservices as smaller pieces that unite to perform a specific action. Microservices can be scaled based on load, creating a more resilient environment. Container orchestration tools, like [Kubernetes](/solutions/kubernetes/), enable developers to manage the way an application’s containers function, including scaling and deployment.\n\nEmbracing cloud native architectures results in an increase in developer time, a decrease in the amount of money spent on monitoring and scaling application resources (through cloud orchestration and container schedulers), and faster shipping.\n\n## GitLab is designed for cloud native architectures\n\nGitLab’s [Kubernetes](/solutions/kubernetes/) integration, [built-in container registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html), and advanced [CI/CD features](/solutions/continuous-integration/) support microservices, such as multi-project pipelines, and monorepo projects. Furthermore, teams can keep the same workflow regardless of which cloud apps they are deploying to, so there’s no need to rework your entire process.\n\n## Why choose GitLab for your cloud native needs\n\nGitLab has a prominent place in the cloud native ecosystem and according to Forrester: [“GitLab’s simple and cohesive approach lands it squarely as a leader. GitLab's approach of having a single application to manage each phase of software development comes through in its developer experience”](/press/releases/2019-09-20-gitlab-named-cloud-native-continuous-integration-tools-leader/).\n\nGitLab doesn’t require manual and painstaking scripts. Our tool has native capabilities for Kubernetes integration and an out-of-the-box solution for advanced deployment flows for progressive delivery, like incremental rollout and canary deploys. GitLab also comes with [feature flagging as a built-in capability](/blog/feature-flags-continuous-delivery/), eliminating the need for a third-party solution.\n\nGitLab’s [multicloud](/topics/multicloud/) strategy with workflow portability increases operational efficiencies and makes it the easiest way to build cloud native applications.\n\nCover image by [Julian Santa Ana](https://unsplash.com/@jul_xander) on [Unsplash](https://unsplash.com/photos/FKqH1QhUqaw)\n{: .note}\n",[109,9],{"slug":919,"featured":6,"template":734},"cloud-native-architectures-made-easy","content:en-us:blog:cloud-native-architectures-made-easy.yml","Cloud Native Architectures Made Easy","en-us/blog/cloud-native-architectures-made-easy.yml","en-us/blog/cloud-native-architectures-made-easy",{"_path":925,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":926,"content":932,"config":938,"_id":940,"_type":14,"title":941,"_source":16,"_file":942,"_stem":943,"_extension":19},"/en-us/blog/cloud-native-storage-beginners",{"title":927,"description":928,"ogTitle":927,"ogDescription":928,"noIndex":6,"ogImage":929,"ogUrl":930,"ogSiteName":720,"ogType":721,"canonicalUrls":930,"schema":931},"A guide to cloud native storage for beginners","Choosing a cloud native development strategy is a smart step in DevOps, but storage can be a challenge. Here’s what you need to consider.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681560/Blog/Hero%20Images/cloudnative.jpg","https://about.gitlab.com/blog/cloud-native-storage-beginners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A guide to cloud native storage for beginners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-09-10\",\n      }",{"title":927,"description":928,"authors":933,"heroImage":929,"date":935,"body":936,"category":815,"tags":937},[934],"Valerie Silverthorne","2020-09-10","\n\n[DevOps](/topics/devops/) and cloud native go hand-in-hand but that doesn’t mean the journey is straightforward, particularly when it comes to storage. Here’s everything you need to know about cloud-native storage if you’re just getting started. \n\n## What is cloud-native software development?\n\nBoiled down, the term [cloud native](/topics/cloud-native/) simply means taking advantage of the power of the cloud and doing so from the beginning of the software development lifecycle. Flexibility, speed, and “always on” capabilities make the cloud an ideal place for [modern software development](https://www.infoworld.com/article/3281046/what-is-cloud-native-the-modern-way-to-develop-software.html).\n\nAlthough [containers aren’t limited to just the cloud](https://containerjournal.com/features/what-do-containers-have-to-do-with-being-cloud-native-anyway/), they are a key part of cloud native software development because they make it simple to move chunks of code from cloud to cloud using the same set of tools and processes. Containers can be created, moved or deleted with just the click of a mouse. [Kubernetes](/solutions/kubernetes/) is an increasingly popular open source tool for managing containers.\n\n## Why storage is the stumbling block\n\nSo far, so good, but what about storage? The features that make containers so ideal for cloud native (flexible, portable, disposable) are the same things that make them a storage nightmare. Developers finished with containers can just kill them – but for most apps to work, they need access to reliable storage that can’t be eliminated. \n\nAnd that’s the big hiccup when it comes to cloud native storage, says [Brendan O’Leary](/company/team/#brendan), senior developer evangelist at GitLab. “Almost every app in existence needs database storage,” Brendan explains. “But in a cloud native world things come and go but storage can’t do that. Storage has to stick around and solving for that is the hardest part of cloud native. That’s the thing we need to conquer next.”\n\nThe [Cloud Native Computing Foundation](https://www.cncf.io/) says the goal is to create [\"persistent information\"](https://www.cncf.io/blog/a-complete-storage-guide-for-your-kubernetes-storage-problems/) that exists no matter what’s going on around it. Ideally the CNCF recommends that information not be stored in what it calls \"volatile\" containers.\n\n## Solutions on the horizon\n\nThe good news is that a number of companies are trying to solve the tricky problem of cloud native storage. Here’s a quick look in no particular order (Cockroach and Rancher are GitLab partners):\n\n* [OpenEBS]( https://openebs.io) is a Kubernetes-based tool to create stateful applications using Container Attached Storage.\n* Also Kubernetes-based, [Rook](https://rook.io) offers self-managed, scaling, and healing storage services.\n* [Cockroach Labs](https://www.cockroachlabs.com/) uses Distributed SQL to make databases portable and scalable.\n* [Rancher Longhorn](https://longhorn.io) offers persistent storage for Kubernetes.\n\n## Final considerations\n\nA Gartner Group report, “Top Emerging Trends in Cloud-Native Infrastructure”, advises clients to “choose storage solutions aligned with container-native data service requirements and the standard storage interface, [Container Storage Interface (CSI)](https://www.architecting.it/blog/container-storage-interface/). CSI is an API that lets container orchestration platforms like Kubernetes seamlessly communicate with stored data via a plug-in. \n\nAnd finally, there’s no shame in choosing something straightforward, Brendan suggests, particularly if you’re just getting started in the Kubernetes world. “You can go with a cloud provider’s data storage options,” he says. “That’s still cloud native but it’s even simpler to just use the tools that exist. Don’t try to reinvent the wheel.”\n\nCover image by [Joshua Coleman](https://unsplash.com/@joshstyle) on [Unsplash](https://unsplash.com)\n{: .note}\n",[9,859,731],{"slug":939,"featured":6,"template":734},"cloud-native-storage-beginners","content:en-us:blog:cloud-native-storage-beginners.yml","Cloud Native Storage Beginners","en-us/blog/cloud-native-storage-beginners.yml","en-us/blog/cloud-native-storage-beginners",{"_path":945,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":946,"content":952,"config":959,"_id":961,"_type":14,"title":962,"_source":16,"_file":963,"_stem":964,"_extension":19},"/en-us/blog/cloudhealth-and-gitlab-reducing-overruns",{"title":947,"description":948,"ogTitle":947,"ogDescription":948,"noIndex":6,"ogImage":949,"ogUrl":950,"ogSiteName":720,"ogType":721,"canonicalUrls":950,"schema":951},"How to prevent deployments from overrunning your budget","Guest authors from VMware share how to include budget and resource checking into your continuous deployment with Cloudhealth and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670389/Blog/Hero%20Images/gitlab-cloud-journey.png","https://about.gitlab.com/blog/cloudhealth-and-gitlab-reducing-overruns","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to prevent deployments from overrunning your budget\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Davis\"},{\"@type\":\"Person\",\"name\":\"Bahubali (Bill) Shetti\"}],\n        \"datePublished\": \"2019-08-26\",\n      }",{"title":947,"description":948,"authors":953,"heroImage":949,"date":956,"body":957,"category":815,"tags":958},[954,955],"Tim Davis","Bahubali (Bill) Shetti","2019-08-26","\n\nManaging deployments is a complex task and DevOps admins generally consider it a victory when a deployment is\nachieved and somewhat repeatable. Unfortunately this process doesn't give DevOps admins time to\nconsider the impact of the outcome on the larger operations pipeline. We know the importance of\n[Continuous Verification](https://thenewstack.io/continuous-verification-the-missing-link-to-fully-automate-your-pipeline/)\n– it's just one of several day-two operations and best practices that need to be brought into the\ncontinuous deployment (CD) process to achieve efficiencies. But we also need to look at the budget.\n\n## Adding budget and resource checking into your CD\n\nMost developers and DevOps admins don't consider the impact of their deployment on the budget. They\nalso don't generally check if sufficient resources in AWS exist prior to deployment because, after\nall, aren't there \"unlimited\" resources on AWS?\n\nAdding the proper budget and resource checks into the pipeline helps avoid:\n\n* Potential rollbacks and clean-up actions\n* Redeployment (\"lift and shift\") into other regions in AWS\n* Long analysis to pinpoint budget overruns\n\nNot having to deal with these tasks improves the DevOps admin's metrics, such as mean time to change (MTTC),\ndeployment time, etc., and subsequently efficiency goes up.\n\n## Understanding the policy\n\nPrior to implementing any of these checks, it’s important to understand the \"policy.\" While every\norganization is different, and the iterations of \"policy\" are endless, there are some basic checks\nthat should always be implemented:\n\n* Ensure the project-specific budget is not already overrun\n* Will this deployment exceed the project budget?\n* Is the project already over project-specific limits and restrictions? (i.e. cannot use RDS, or\ncan't have more than 10 EC2 instances in a deployment)\n* Will this deployment exceed the project-specific resource policy?\n\nWith these basic checks in place, at least some initial sanity is achieved during a pipeline execution.\nMore and more complex iterations can be added as more is learned about the project and processes are improved.\n\n## How do you do it?\n\nRegardless of the policy complexity, implementing these checks can be easily accomplished with\nstandard off-the-shelf tools like [CloudHealth by VMware](https://cloudhealthtech.com) and [GitLab](/).\n\n* CloudHealth by VMware allows you to define \"perspectives\" specific to your project, create governance\nrules, and access this information through an API for easy integration into any CI/CD tool.\n* GitLab allows you to easily add in scripts and/or pre-built code (containers) enabling\nany possible check against any potential external system.\n\nIn order to highlight how to implement this type of check into the CI/CD pipeline, we've\ndelivered an [example configuration](https://cloudjourney.io/articles/multicloudops/budget_check_cicd-td/)\nusing both CloudHealth and GitLab. We hope this provides a nice baseline to build from.\n\n![CD WITH A CH check from GitLab CI/CD pipelines](https://about.gitlab.com/images/blogimages/glcdpipeline.png){: .shadow.medium.center}\n\n## In summary\n\nAlthough we've provided a baseline that we hope can be used for more complex policy checks in CD,\nconvincing DevOps admins to implement this is another problem. Improving metrics should provide\nan incentive for DevOps admins but it is not sufficient for them to simply add budget and resource checks.\nWhile every enterprise has its own process and metrics, we recommend adding a budgetary efficiency\nmetric for DevOps admins.\n\nUsing the configuration above, it’s easy to add in CloudHealth to continuously check the project's\nbudget and utilization, and adding a DevOps budget metric will not only ensure that these checks\nare deployed but will also lead to more efficient deployments.\n\nIf you have any questions regarding this or any other issue, feel free to reach out\nto us [@cloudjourneyio](https://twitter.com/cloudjourneyio) on Twitter!\n\n### About the guest authors\n\n_Bahubali (Bill) Shetti is the director of public cloud solutions for VMware Cloud Services at VMware.\nHe leads a team of cloud architects that evangelize and develop solutions for improving public cloud\noperations (AWS/Azure/GCP). Bahubali was part of the initial team that developed and launched\nVMware Cloud Services. Previous to VMware, he was director of product management at VCE\n(now Dell) for Cloud Management Products. Between 2011-2014, Bahubali lead operations at Cumulus\nNetworks, lead AWS cloud operations at several startups, and headed an open source routing\nsoftware project. Between 2008-2010, Bahubali lead the cloud investment practice at Storm Ventures.\nHe spent 9 years at Cisco in product management and business development. He holds an M.S. in\nInformation Networking from Carnegie Mellon and a B.S. in Electrical Engineering from Rutgers._\n\n_Tim Davis is a cloud advocate at VMware where he focuses on public cloud operations and cloud native\napplications. He provides consulting guidance to a wide range of customers on these topics and\nprovides a bridge between customers and product teams at VMware. He also works to evangelize\nnative cloud usage including AWS, Azure and GCP. Prior to his current role, he was a specialist systems\nengineer focused on VMware’s Networking and Security product line. Before VMware, Tim worked as a\nconsultant and VMware architect at Dell Services, which wasone of the largest contracts held at\nthe time. His background is in operations/management and architecture. He holds numerous\nindustry certifications including from VMware and Amazon Web Services._\n",[109,9,563,232],{"slug":960,"featured":6,"template":734},"cloudhealth-and-gitlab-reducing-overruns","content:en-us:blog:cloudhealth-and-gitlab-reducing-overruns.yml","Cloudhealth And Gitlab Reducing Overruns","en-us/blog/cloudhealth-and-gitlab-reducing-overruns.yml","en-us/blog/cloudhealth-and-gitlab-reducing-overruns",{"_path":966,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":967,"content":973,"config":980,"_id":982,"_type":14,"title":983,"_source":16,"_file":984,"_stem":985,"_extension":19},"/en-us/blog/cncf-five-technologies-to-watch-in-2021",{"title":968,"description":969,"ogTitle":968,"ogDescription":969,"noIndex":6,"ogImage":970,"ogUrl":971,"ogSiteName":720,"ogType":721,"canonicalUrls":971,"schema":972},"CNCF's 5 technologies to watch in 2021","We predict how CNCF's five tech trends to watch will impact cloud native and the tech industry over the next year and beyond.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680997/Blog/Hero%20Images/clouds-cover.jpg","https://about.gitlab.com/blog/cncf-five-technologies-to-watch-in-2021","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"CNCF's 5 technologies to watch in 2021\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-11-24\",\n      }",{"title":968,"description":969,"authors":974,"heroImage":970,"date":976,"body":977,"category":815,"tags":978},[975],"Brendan O'Leary","2020-11-24","\n\nLast week the Cloud Native Computing Foundation (CNCF) held [KubeCon + CloudNativeCon North America](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/). Even with conferences shifting from in-person to virtual, KubeCon still draws huge crowds and the entire industry's attention. Besides being one of the largest tech conferences of the year, KubeCon continues to show the cutting edge of technology at the forefront of the industry.\n\nToward the conclusion of the conference, [Liz Rice](https://www.cncf.io/spotlights/cncf-community-leader-spotlight-liz-rice/) - chairperson of the CNCF's Technical Oversight Committee (TOC) and VP of Open Source Engineering at Aqua Security - got on the virtual stage to share where the CNCF is going in the coming year and to talk about predictions for the industry as a whole. These predictions covered a vast landscape of new and emerging technologies and ideas. Some of the ideas are entirely within the bounds of the cloud native community, like service mesh, while others, like WebAssembly and eBPF, have even broader impact inside and outside of cloud native technology.\n\nIn the six years since the initial release of Kubernetes, the cloud native landscape has seen a proliferation of technologies and projects related to Kubernetes and cloud native in general. Rice even talks about this in [her closing remarks](https://kccncna20.sched.com/event/eoIl/keynote-predictions-from-the-technical-oversight-committee-toc-liz-rice-cncf-toc-chair-vice-president-open-source-engineering-aqua-security), discussing the much loved and much talked about CNCF landscape. After adding many more graduated projects this year, one of the first predictions is that the coming year will see some current sandboxed projects at the CNCF fail. As Rice explains, this is a natural consequence of the CNCF pushing for innovation because not every innovative project will find a use case in the \"real world\" that justifies the effort of bringing it to market alongside juggernauts like Kubernetes, Envoy, and etcd.\n\n## CNCF's 2021 predictions\n\nOne of the most exciting segments was Rice's five predictions for the technology industry at large - inside and outside of cloud native technologies. These five technologies to watch (or six depending on how you count them) span several emerging technology platforms and speak to the great diversity of needs and projects in the open source community. The TOC's five technology trends to watch include:\n\n1. Chaos engineering\n2. Kubernetes for the edge\n3. Service mesh\n4. Web assembly and eBPF\n5. The developer and operator experience\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Wdyt? What did we miss? \u003Ca href=\"https://t.co/ErA8jZ6lsS\">https://t.co/ErA8jZ6lsS\u003C/a>\u003C/p>&mdash; Liz Rice at KubeCon + CloudNativeCon 🇪🇺 (@lizrice) \u003Ca href=\"https://twitter.com/lizrice/status/1329867030284144640?ref_src=twsrc%5Etfw\">November 20, 2020\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n## Chaos engineering\n\nThe systems and applications we build are getting more and more complex and the human ability to accurately reason about how each component will interact and react becomes harder or impossible. [Chaos engineering](https://en.wikipedia.org/wiki/Chaos_engineering), first proposed and famously [practiced by Netflix's engineering team](https://netflixtechblog.com/tagged/chaos-engineering), takes that change to heart and accepts that complex enough systems are genuinely unpredictable. Once you've understood this aspect of complex systems, the best way to test and reason about their reliability is to perform experiments that best represent real-life, unpredictable events.\n\nWhile the concept of \"turn off a component and see how the system as a whole reacts\" makes sense on the surface, implementing such a methodology, especially in a large enterprise organization, can be daunting. Many projects and more than a few companies have been created to deal with this problem. It will be interesting to see if chaos engineering can move from the \"elite\" technology performers into a more mainstream engineering organization of every size and maturity level.\n\nAt GitLab, we have many customers already experimenting with or practicing chaos engineering. [Uma Mukkara](https://in.linkedin.com/in/uma-mukkara) and [Karthik Satchitanand](https://in.linkedin.com/in/karthik-satchitanand) from Maya Data presented on Chaos Engineering using GitLab templates and LitmusChaos at GitLab Commit in Brooklyn in 2019. We're also considering the many ways that chaos engineering could be more [deeply integrated](https://gitlab.com/groups/gitlab-org/-/epics/381) into GitLab as part of a single [DevOps](/topics/devops/) platform. Watch the video from Uma nad Karthik's GitLab Commit Brooklyn presentation below.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/ezhSg-t-PPM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Kubernetes for the edge\n\nEdge computing refers to an area of cloud computing where the infrastructure for computing, storage, and other requirements need to be placed in the field closer to users or their use cases. While cloud computing helps to centralize and create large data centers that benefit from scale, many if not most interactions with users occur far away from the data center and instead move to the edge.\n\nAs Kubernetes matures and transforms compute in the data center, more use cases for the core tenants of Kubernetes will emerge. And as those use cases expand in scope, we will continue to see new distributions or plugins to the Kubernetes ecosystem to support new use cases. Projects like [KubeEdge](https://kubeedge.io/en/), [K3s](https://k3s.io/), and others, bring the Kubernetes API and extensibility to more devices, even those on the edge.\n\nWith the onslaught of data, devices, and demand for performance, edge computing has become an essential component of many organizations' overall network topology. Bringing the flexibility and power of Kubernetes compute and processing options to this problem will continue to expand in the coming year. For example, there may even be a Kubernetes cluster running [in your car](https://www.youtube.com/watch?v=zmuOxFp3CAk&feature=emb_title) today.\n\n## Service mesh\n\nRice predicts service mesh will be a hot topic in 2021, and with good reason. There has been an explosion of service mesh projects, discussions, and drama throughout the cloud native community in the past year. There has been an enormous proliferation of service mesh projects and teams discussing how a service mesh can benefit their deployments in 2020.\n\nSimilar to chaos engineering, service mesh attempts to organize the growing complexity of systems into a clear and reasonable package. As teams move to a [microservices approach](/topics/microservices/) for application delivery, understanding the interaction and links between existing and new services becomes critical. Service mesh projects like [Istio](https://istio.io/), [Linkerd](https://linkerd.io/), and [Consul](https://www.consul.io/) have cropped up in the past few years. These tools help discover both known and new services and their connections. The goal of the projects is to create signal from noise, allowing humans to understand how those services interact and depend on one another.\n\nIn 2020, there was a lot of drama and discussion around the overall benefits and drawbacks of service mesh and the specific projects used to implement it. Now that there is a greater understanding among CNCF stakeholders about service mesh, we can expect the cloud native community to settle into a clear set of recommendations about when it is appropriate to implement a service mesh and how to make the right decisions about service mesh for your organization.\n\nThe most significant trend here will be with the ability of service mesh to not only discover services but secure them through policy enforcement. Additionally, the desire for observability will drive service meshes to become a critical cornerstone of observability in microservices environments.\n\n## Web assembly and eBPF\n\nIn this prediction, Rice rightly points out that the technologies of web assembly and eBPF are not - on the surface - related. [Web assembly](https://webassembly.org/), also called Wasm, is a new type of virtual machine brought to the browser. [eBPF](https://ebpf.io/) is a programmable interface for interacting with the Linux kernel. So why did the TOC and Rice decide to include these two different technologies in one prediction?\n\nWell, they share a common goal of sandboxing code when it runs. Sandboxing code, which means segmenting it from the parts of memory and the computer it doesn't need to get its job done, is a critical step toward allowing for secure code execution even of unknown sources. In the case of web assembly, that code is running in your browser. For eBPF, it could be running on a shared cloud-based Linux host. In both cases, these tools enable providers and security teams to effectively protect their code and data from prying eyes. This will remain a key objective for engineering teams for years to come, because we need to segment code better from a security perspective.\n\n### Securing code by segmenting processes\n\nMany of the most massive zero-day attacks we've seen in the past few years demonstrate that some traditional pieces of the stack that we \"take for granted\" should instead be prioritized. Today, the barriers of the application memory or even CPU space are still ripe for attack. So inventing new and more secure ways of segmenting processes from one another will be a trend to watch for in 2021 and beyond.\n\nAt GitLab we see security and protection as belonging to the same DevOps lifecycle as the rest of engineering. The [Secure](/stages-devops-lifecycle/secure/) and [Protect](/stages-devops-lifecycle/govern/) stages of the DevOps lifecycle will continue to impact the rest of the cycle and how engineering departments develop and release code faster and more securely. We will see continued consolidation throughout the industry to bring security and protection initiatives to the forefront of every developer's mind, enabling developers and security professionals alike to deploy with confidence.\n\n## The developer and operator experience\n\nSimilar to prioritizing function over UX, our own experience in developing, deploying, and maintaining our projects often takes a back seat to \"getting the job done.\" However, in much the same way, the developer experience and operator experience in their day-to-day tasks will be a key focus as technologies like Kubernetes enter a more mature phase.\n\nWe've already seen colossal consolidation and focus on the DevOps platform as a whole. It was just a year or two ago that we grudgingly accepted a disjointed set of poorly integrated tools, seeing it as unavoidable. Today, we see many DevOps companies and teams selling [enterprise tools](/enterprise/) that are focusing on improving the dev and ops experience by building more capability into our devices and bringing together a more [complete DevOps platform](/solutions/devops-platform/).\n\nThis is a mission that is obviously near and dear to our hearts at GitLab. Next year will bring a renewed focus on the dev and ops experience as more companies settle into the new normal of collaborating with teammates remotely, asynchronously, and automatically. This focus makes the DevOps platform we choose all the more critical to our engineering team's success, and as software defines the world we live in even more by the day, our organizations' overall success.\n\nDevelopers and operators will come to expect an integrated DevOps platform that allows for the dual goals of getting software build and shipped on day 0 and maintaining and operating that software on days 1, 2, and beyond.\n\n## What's next?\n\nA trend that is harder to quantify is the concept of [observablity](/blog/software-developer-changing-role/) and growing trends toward more open communities. The concept of service mesh, Kubernetes at the edge, and the operator experience all play into observability, but I suspect we'll see more discussion of it in the coming year. Also the acceleration of [5G technology](/blog/how-tomorrows-tech-affects-sw-dev/) will impact all computing at the edge - Kubernetes or not. Beyond 2021, trends in [AI in software development](/blog/ai-in-software-development/) may accelerate changes to how we all interact. What trends do you think the CNCF missed in outlining things to watch in 2021? If you have a strong opinion, I'd love to hear about it on [Twitter](https://twitter.com/twitter).\n",[9,859,979],"security",{"slug":981,"featured":6,"template":734},"cncf-five-technologies-to-watch-in-2021","content:en-us:blog:cncf-five-technologies-to-watch-in-2021.yml","Cncf Five Technologies To Watch In 2021","en-us/blog/cncf-five-technologies-to-watch-in-2021.yml","en-us/blog/cncf-five-technologies-to-watch-in-2021",{"_path":987,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":988,"content":994,"config":1000,"_id":1002,"_type":14,"title":1003,"_source":16,"_file":1004,"_stem":1005,"_extension":19},"/en-us/blog/containers-kubernetes-basics",{"title":989,"description":990,"ogTitle":989,"ogDescription":990,"noIndex":6,"ogImage":991,"ogUrl":992,"ogSiteName":720,"ogType":721,"canonicalUrls":992,"schema":993},"Kubernetes & containers, and where cloud native fits in – the basics","Brush up on your understanding of these concepts key to modern development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671296/Blog/Hero%20Images/containers-kubernetes-basics.jpg","https://about.gitlab.com/blog/containers-kubernetes-basics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kubernetes & containers, and where cloud native fits in – the basics\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rebecca Dodd\"}],\n        \"datePublished\": \"2017-11-30\",\n      }",{"title":989,"description":990,"authors":995,"heroImage":991,"date":997,"body":998,"category":752,"tags":999},[996],"Rebecca Dodd","2017-11-30","\n\nWe throw around terms like Kubernetes, containers, and cloud native with some abandon, but sometimes take it for granted that everyone knows what's what. So here we go...\n\n\u003C!-- more -->\n\n## Container explainer\n\nA container is a method of operating system-based virtualization that allows\nyou to securely run an application and its dependencies independently without\nimpacting other containers or the operating system.\n\nBefore containers, it was common to use virtual machines (VMs) to provide a safe, sandbox environment in which to test software, within a computer. A container works much like a virtual machine except that, instead of packaging\nyour code with an operating system, it is run as a Linux process\ninside of the kernel. This means that each container only contains the code and dependencies needed to run that specific application, making them smaller and faster to run.\n\n![Containers vs virtual machines vs bare metal](https://about.gitlab.com/images/blogimages/containers-vm-bare-metal.png){: .medium.center}\n\n*\u003Csmall>Containers retain the same repeatability factor as virtual machines, but are much faster and use fewer resources to run.\u003C/small>*\n\n## Kuber... what?\n\nKubernetes is primarily a container scheduler – an open source platform designed to automate your management of application containers, from deploying and scaling to operating.\n\nWhile virtualization technology statically partitions your servers into smaller VMs, Kubernetes allows you to partition as you go, depending on how much or little resources are needed at the time, scaling up and down as necessary. You can respond quickly and efficiently to customer demand while limiting hardware usage and minimizing disruption to feature rollouts. With container schedulers, the focus shifts from the machine to the service – the machine becomes an ephemeral, disposable element.\n\nWhat's more, using containers in this way means they are decoupled from the host filesystem and underlying infrastructure, making them portable across clouds and operating systems.\n\n## Containers + Kubernetes \u003Ci class=\"fas fa-arrow-right\" aria-hidden=\"true\">\u003C/i> cloud native\n\nWhich brings us to [cloud native development](/topics/cloud-native/). Cloud native applications embrace a new approach to building and running applications that takes full advantage of the cloud computing model and container schedulers such as Kubernetes.\n\nNot to be confused with running traditional applications in the cloud, cloud native means that applications are purpose-built for the cloud, and consist of loosely coupled services. Applications are re-architected for running in the cloud – shifting the focus away from the machine to the service instead. Cloud native acknowledges that the cloud is about more than just who manages your servers – it is the next step in digital transformation.\n\nBy building applications that can run on any cloud, right out of the box, you’re free to migrate and distribute across vendors in line with your budget and business priorities. You also free up developer time – they don’t have to write code to run and scale across a range of cloud infrastructures, so they can focus on improvements and new features.\n\nSound good? We think so! Visit [about.gitlab.com/kubernetes](/solutions/kubernetes/) to learn more about how GitLab and Kubernetes can get you to cloud native nirvana.\n\n[Cover image](https://unsplash.com/@guibolduc?photo=uBe2mknURG4) by [Guillaume Bolduc](https://unsplash.com/@guibolduc) on Unsplash\n{: .note}\n",[859,9],{"slug":1001,"featured":6,"template":734},"containers-kubernetes-basics","content:en-us:blog:containers-kubernetes-basics.yml","Containers Kubernetes Basics","en-us/blog/containers-kubernetes-basics.yml","en-us/blog/containers-kubernetes-basics",{"_path":1007,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1008,"content":1014,"config":1022,"_id":1024,"_type":14,"title":1025,"_source":16,"_file":1026,"_stem":1027,"_extension":19},"/en-us/blog/custom-actions-rasa-gitlab-devops",{"title":1009,"description":1010,"ogTitle":1009,"ogDescription":1010,"noIndex":6,"ogImage":1011,"ogUrl":1012,"ogSiteName":720,"ogType":721,"canonicalUrls":1012,"schema":1013},"Creating custom action containers for Rasa X with GitLab","Using the GitLab DevOps Platform together with Rasa X can make it easier for stakeholders to deliver a virtual assistant by automating potentially time-consuming, error-prone steps. In this case, we’ve shown how you can build Rasa custom action servers and deploy them to Kubernetes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668410/Blog/Hero%20Images/vablog.jpg","https://about.gitlab.com/blog/custom-actions-rasa-gitlab-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Create and Deploy Custom Actions Containers to Rasa X using Gitlab DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2021-04-06\",\n      }",{"title":1015,"description":1010,"authors":1016,"heroImage":1011,"date":1018,"body":1019,"category":576,"tags":1020},"Create and Deploy Custom Actions Containers to Rasa X using Gitlab DevOps Platform",[1017],"William Arias","2021-04-06","**This blog post was a collaboration between William Arias, from Gitlab, and\nVincent D. Warmerdam, from Rasa. You can find the same blog post on [Rasa's\nblog](https://blog.rasa.com/create-and-deploy-custom-actions-containers-to-rasa-x-using-gitlab-devops-platform/)**.  \n\n\n## Create and Deploy Custom Actions Containers to Rasa X using Gitlab DevOps\nPlatform\n\nVirtual assistants do more than just carry on conversations. They can send\nemails, make updates to a calendar, or call an API endpoint. Essentially,\nthey can do actions that add significant value and convenience to the user\nexperience.\n\nIn assistants built with Rasa*, this type of functionality is executed by\ncustom code called custom actions. As with any code you run in production,\nyou’ll need to think about how you want to deploy updates to custom actions.\nIn this blog post, we’ll show you how to set up GitLab to deploy custom\naction Docker containers to your Kubernetes cluster. If we follow [good\nDevOps practices](/stages-devops-lifecycle/) we can greatly speed up the\ndevelopment and quality of our  virtual assistants.\n\n* Rasa Open Source is a machine learning framework for building text and\nvoice-based virtual assistants. It provides infrastructure for understanding\nmessages, holding conversations, and connecting to many messaging channels\nand APIs. Rasa X is a toolset that runs on top of Rasa Open Source,\nextending its capabilities. Rasa X includes key features for sharing the\nassistant with test users, reviewing and annotating conversation data, and\ndeploying the assistant. [Learn more about Rasa.](https://rasa.com/docs/)\n\n\n## Deployment high-level overview\n\nThe typical workflow for deploying a new version of custom actions is\noutlined below.  \n\n![actions-process](https://about.gitlab.com/images/blogimages/actions-process.png){:\n.shadow}\n\n\nEvery change to your custom actions code will require a new container image\nto be built and pulled by Rasa X. Gitlab CI/CD can save you from doing a lot\nof manual work and automate steps like the ones described in the workflow\nabove. Let's see how to do it.  \n\n\n## Using Rasa with Gitlab DevOps Platform\n\nLet's create a pipeline that will automate manual steps.\n\n\n---\n\n**NOTE**\n\nThis article assumes you have your [Gitlab\nProject](https://gitlab.com/warias/gl-commit-2020) with your customs Actions\nCode created along with a [Google Kubernetes\nCluster](https://cloud.google.com/kubernetes-engine/docs/quickstart).\n\n\n---\n\n\nIf you are a Gitlab user you are probably familiar with .gitlab-ci.yml file\nand its CI/CD capabilities. Every time you commit a change to your customs\nactions code you want Gitlab to run a script that will build and update your\ndocker containers. \n\n![actions-process-2](https://about.gitlab.com/images/blogimages/process2.png){:\n.shadow}\n\n\nLet's breakdown the CI/CD pipeline by describing the gitlab-ci.yml file so\nyou can use it and customize it to your needs\n\n## Variables\n\nWe make use of environment variables created in Gitlab at the moment of\nrunning the Jobs to define our actions Docker image  \n\n\n```\n\nvariables:\n    ACTIONS_CONTAINER_IMAGE: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG\n    TAG: $CI_COMMIT_SHA\n    K8S_SECRET: secret-gitlab-registry\n\n```\n\n\nThe snippet above does the following:\n\n- It defines the name of the Docker Image for custom actions using\nenvironment variables ```$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG.``` This\nwill make the name of the Docker image different for every commit\n\n- It creates a secret used to pull the Rasa Action Image from the Gitlab\nPrivate Registry to the Google Kubernetes Cluster. \n\n\n## Stages\n\nWe have two main stages in our pipeline, build and deploy:\n\n```\n\nstages:\n  - build\n  - deploy \n```\n\nEvery time there is a new commit with changes to our custom actions code, or\nwhen we decide to run the CI/CD Pipeline it will:\n\n- Build: Here, we automate the building of the Docker image using the\nvariables defined above, and the Dockerfile. We also tag the image and push\nit to the GitLab container registry.\n\n- Deploy: Here we log-in to Kubernetes Engine on Google Cloud and deploy the\nnewly created Actions image to Rasa X.\n\nLet's see it in more detail:  \n\n\n**Build**:\n\n```\n\nbuild-actions-image:\n image: docker:19.03.1\n services:\n   - docker:dind\n stage: build\n script:\n   - docker login -u ```$CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY```\n   - docker build -t $ACTIONS_CONTAINER_IMAGE:$TAG -f Dockerfile .\n   - docker push $ACTIONS_CONTAINER_IMAGE:$TAG\n```\n\nThe job build-actions-image executed on the build stage takes advantage of\nthe CI/CD variables that are part of the environment where the pipelines\nrun. It automates the usage of Docker commands to build the Actions image by\nreading its corresponding Dockerfile. The output of this stage is a new\nCustom Actions image per every commit with code changes.  \n\n\n**Deploy**:\n\n```\n\ndeploy-custom-action-x:\n  stage: deploy\n  image: crileroro/gcloud-kubectl-helm\n  variables:\n    GCP_PROJECT: gke-project-302411\n    GCP_REGION: europe-west1\n    CLUSTER_NAME: gke-python-demo\n    NAMESPACE_RASA: rasa-environment \n  before_script:\n    - gcloud auth activate-service-account --key-file $SERVICE_ACCOUNT_GCP\n    - gcloud config set project $GCP_PROJECT\n    - gcloud config set compute/region $GCP_REGION\n    - gcloud container clusters get-credentials $CLUSTER_NAME\n  script:\n    - kubectl create ns $NAMESPACE_RASA --dry-run=client -o yaml | kubectl apply -f -\n    - kubectl create secret docker-registry $K8S_SECRET\n              --docker-server=$CI_REGISTRY\n              --docker-username=$CI_DEPLOY_USER\n              --docker-password=$CI_DEPLOY_PASSWORD\n              --namespace $NAMESPACE_RASA\n              -o yaml --dry-run=client | kubectl apply -f -\n    - helm repo add rasa-x https://rasahq.github.io/rasa-x-helm\n    - helm upgrade -i --reuse-values \n                      --namespace $NAMESPACE_RASA\n                      --set app.name=$ACTIONS_CONTAINER_IMAGE\n                      --set app.tag=$TAG \n                      --set images.imagePullSecrets[0].name=$K8S_SECRET rasa-x rasa-x/rasa-x\n```\n\n\nNotice the variables in ```before_script```, these ones are needed to\nauthenticate to GCP where we have our Kubernetes cluster. This step is\noptional and could be skipped in cases where you have [Gitlab\npre-integrated](https://docs.gitlab.com/ee/user/project/clusters/add_remove_clusters.html)\nwith your Kubernetes cluster running on Google Cloud.  \n\n\nThe main and most interesting part of the script is:  \n\n```\n\nscript:\n    - kubectl create ns $NAMESPACE_RASA --dry-run=client -o yaml | kubectl apply -f -\n    - kubectl create secret docker-registry $K8S_SECRET\n              --docker-server=$CI_REGISTRY\n              --docker-username=$CI_DEPLOY_USER\n              --docker-password=$CI_DEPLOY_PASSWORD\n              --namespace $NAMESPACE_RASA\n              -o yaml --dry-run=client | kubectl apply -f -\n    - helm repo add rasa-x https://rasahq.github.io/rasa-x-helm\n    - helm upgrade -i --reuse-values \n                      --namespace $NAMESPACE_RASA\n                      --set app.name=$ACTIONS_CONTAINER_IMAGE\n                      --set app.tag=$TAG \n                      --set images.imagePullSecrets[0].name=$K8S_SECRET rasa-x rasa-x/rasa-x\n\n```\n\n\nWe start by creating the *namespace* for our custom actions code, and if it\nalready exists, then we proceed to apply Kubernetes commands using kubectl\nand helm.  \n\n```\n\nhelm repo add rasa-x https://rasahq.github.io/rasa-x-helm\n    - helm upgrade -i --reuse-values \n                      --namespace $NAMESPACE_RASA\n                      --set app.name=$ACTIONS_CONTAINER_IMAGE\n                      --set app.tag=$TAG \n                      --set images.imagePullSecrets[0].name=$K8S_SECRET rasa-x rasa-x/rasa-x\n```\n\nThe snippet above adds a rasa-x Helm chart and upgrades or changes the\nvalues corresponding to the new **Custom Action Image** by assigning to it\nthe ```$ACTIONS_CONTAINER_IMAGE``` created in the build stage.\n\nNote that the pipeline described above focuses only on creating and\ndeploying the ACTIONS_CONTAINER_IMAGE. It could be extended by adding more\nstages, for example, code quality, security testing, and unit testing among\nothers.  \n\n\n## Summary\n\nUsing the GitLab DevOps Platform together with Rasa X can make it easier for\nstakeholders to deliver a virtual assistant by automating potentially\ntime-consuming, error-prone steps. In this case, we’ve shown how you can\nbuild Rasa custom action servers and deploy them to Kubernetes.\n\nPushing new custom action containers to Kubernetes only scratches the\nsurface of what you can automate with GitLab. You could also add steps for\ncode quality, security audits and unit tests. The main goal is to automate\nthe manual parts of deployment so that you can focus on what is important.\nIn the case of Rasa X, that means that more time can be spent learning from\nyour users and making a better assistant in the process.\n\n\nDo you want to learn more? Watch this video of Gitlab DevOps Platform and\nRasa [Deploy your Rasa Chatbots like a boss with\nDevOps](https://youtu.be/ko9-zPDuhQo)\n\n\nHappy hacking!\n\n\nCover image by [Eric\nKrull](https://unsplash.com/@ekrull?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\non [Unsplash](https://unsplash.com)\n\n{: .note}\n",[1021,9,563],"CI",{"slug":1023,"featured":6,"template":734},"custom-actions-rasa-gitlab-devops","content:en-us:blog:custom-actions-rasa-gitlab-devops.yml","Custom Actions Rasa Gitlab Devops","en-us/blog/custom-actions-rasa-gitlab-devops.yml","en-us/blog/custom-actions-rasa-gitlab-devops",{"_path":1029,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1030,"content":1036,"config":1042,"_id":1044,"_type":14,"title":1045,"_source":16,"_file":1046,"_stem":1047,"_extension":19},"/en-us/blog/delta-cloud-native",{"title":1031,"description":1032,"ogTitle":1031,"ogDescription":1032,"noIndex":6,"ogImage":1033,"ogUrl":1034,"ogSiteName":720,"ogType":721,"canonicalUrls":1034,"schema":1035},"How Delta made the journey to cloud native","Delta tossed aside the rule book to go cloud native and achieve workflow portability. Here's how it's done.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678376/Blog/Hero%20Images/deltacommit.jpg","https://about.gitlab.com/blog/delta-cloud-native","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Delta made the journey to cloud native\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-10-17\",\n      }",{"title":1031,"description":1032,"authors":1037,"heroImage":1033,"date":1038,"body":1039,"category":729,"tags":1040},[934],"2019-10-17","\n_Delta Air Lines is the top domestic carrier in the United States, flying over 200 million people a year to more than 300 destinations in 50 countries. Delta is in a highly competitive industry with a lot of moving parts and that’s why, in 2016, the company began a sweeping digital transformation journey. At [GitLab Commit in Brooklyn](/blog/wrapping-up-commit/), Jasmine James, IT manager, DevOps Center of Excellence at Delta, shared how the company journeyed to [cloud native](/topics/cloud-native/) while avoiding vendor lock-in._\n\nDelta’s primary goal was business agility, Jasmine says, and the plan was to get there using cloud native. “We'll do cloud native and then we'll get the business agility, we thought,” she says. “But at Delta, because we have such large, complex systems and a very mission-critical environment, it was not that easy at all.”\n\nTo start, Delta took a hard look at its existing environment and at ways it could be improved. Metrics-based process mapping made it clear the infrastructure was standing in the way of delivering value. A flexible architecture would also make it easier to have scalable and reliable workloads, she explains. The company’s existing tools wouldn’t work with cloud native, so Jasmine’s team set out to find tools that could provide version control, [continuous integration, and continuous delivery](/solutions/continuous-integration/) – the three areas the team considered the [MVP](https://www.techopedia.com/definition/27809/minimum-viable-product-mvp) to get the job done.\n\n## Stick with vowels\n\nThe team came up with an easy-to-remember acronym to describe the criteria used during the tool search: **AEIOU**. **A** is for applicability: Will the tool be applicable for the heavy Java and Linux users at Delta? **E** meant enterprise-ready because Delta needed tried and true maturity. **I** stands for integration, and Jasmine was quick to point out that in this case, it wasn’t about legacy integration but simply a matter of ensuring all the new tools worked well together. **O** is for overhead, which has particular meaning for Jasmine’s team since they manage all the development tools at Delta. “We had to ask ourselves how easy it would be to manage and administer tools for 5000 developers at Delta,” she says. And finally, **U** represents usefulness, which is another way of saying the team wanted to ensure it would choose the right building blocks that would work together.\n\nDelta’s first choice of tools was GitLab, followed by [Sonatype Nexus](https://www.sonatype.com/product-nexus-repository) and Jenkins for CI, Jasmine says. Today Delta is considering expanding its options for developers to also include [GitLab CI](/solutions/continuous-integration/).\n\n## Careful choices = concrete benefits\n\nThe careful thought process has already shown a number of concrete benefits, Jasmine says. Delta created an API to allow customers flying different legs using partner airlines to check in just one time. And the airline’s employees have enhanced decision support around weather events that help to minimize the impact of canceled flights.\n\nBut the benefits go further, Jasmine stresses. “We now have the ability to play the field,” she says. “We not only can leverage the best of breed features in the public cloud space, we also can pick and choose based on public cloud provider performance and cost. With the cost savings we have been able to do a lot (which means we can) fund more great features.”\n\nDelta’s also been able to offer what Jasmine calls a “first class developer experience” because programmers can leverage both the airline’s on premises [Open Shift](https://www.openshift.com) private cloud and scale to the public cloud as needed, all while using familiar programming languages and tools.\n\nJasmine’s take away: “Be you, be different, be great in cloud native. What that means is that although I’ve talked a lot about Delta’s journey, there is no one way to implement cloud native.”\n\nWatch all of Jasmine’s presentation:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/zV_hFcxoN8I\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCover image by [Angela Compagnone](https://unsplash.com/@angelacompagnone) on [Unsplash](https://unsplash.com/).\n{: .note}\n",[9,859,1041,9],"user stories",{"slug":1043,"featured":6,"template":734},"delta-cloud-native","content:en-us:blog:delta-cloud-native.yml","Delta Cloud Native","en-us/blog/delta-cloud-native.yml","en-us/blog/delta-cloud-native",{"_path":1049,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1050,"content":1056,"config":1063,"_id":1065,"_type":14,"title":1066,"_source":16,"_file":1067,"_stem":1068,"_extension":19},"/en-us/blog/deploy-aws",{"title":1051,"description":1052,"ogTitle":1051,"ogDescription":1052,"noIndex":6,"ogImage":1053,"ogUrl":1054,"ogSiteName":720,"ogType":721,"canonicalUrls":1054,"schema":1055},"How to deploy to AWS with GitLab","We believe deploying to the cloud should be easy and boring. The deployment process is the same regardless of what tech stack you're using so why not automate it?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672124/Blog/Hero%20Images/aws_rocket.jpg","https://about.gitlab.com/blog/deploy-aws","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy to AWS with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2020-12-15\",\n      }",{"title":1051,"description":1052,"authors":1057,"heroImage":1053,"date":1059,"body":1060,"category":752,"tags":1061},[1058],"Orit Golowinski","2020-12-15","\nCloud computing services are replacing traditional hardware technologies at an extremely fast pace. The majority of businesses worldwide are already moving their applications to the cloud — both public and private cloud — or plan to in the near future. Over a short period of time, this technology took over the market as businesses preferred remote access to data as well as the cloud's scalability, economy, and reach.\n\n## AWS Deployment: deploying applications to the cloud\n\nCOVID-19 and the resulting trend toward remote work forced organizations to adopt cloud technologies even if they hadn’t planned to originally. Software deployment to the cloud has also increased. Cloud is no longer just virtual machines, organizations are driving the use of [Containers as a Service (CaaS)](https://searchitoperations.techtarget.com/definition/Containers-as-a-Service-CaaS) due to their growing interest in leveraging containers to ease development and testing, speed up deployment, scale operations, and increase the efficiency of workloads running in the cloud.\n\nSince deployment to the cloud has become a standard practice, at GitLab we want to make this repeatable and [boring](https://handbook.gitlab.com/handbook/values/#boring-solutions). In this blog post, we explain how we've made it easier to deploy to Amazon Web Services (AWS) as part of your deployment process. We invite users to replicate this example to deploy to other cloud providers in a similar way.\n\nSince we want cloud deployment to be as flexible as possible (similar to a microservices architecture), we constructed atomic Docker images that function as building blocks. Users can use these images as part of their custom `gitlab-ci.yml` file or use our predefined `.gitlab-ci.yml` templates. We also added the ability to use [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) with the new AWS deployment targets.\n\n## AWS Deployment: how to use GitLab's official AWS Docker Images\n\n### AWS CLI Docker image\nIn [GitLab 12.6](/releases/2019/12/22/gitlab-12-6-released/), we provided an official GitLab [AWS cloud-deploy](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/cloud_deploy/Dockerfile) Docker image that downloads and installs the [AWS CLI](https://aws.amazon.com/cli/). This allows users to run `aws` commands directly from their pipelines. For more information, see [Run AWS commands from GitLab CI/CD](https://docs.gitlab.com/ee/ci/cloud_deployment/#run-aws-commands-from-gitlab-cicd).\n\n### CloudFormation stack creation Docker image\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we provided a Docker image that runs a script that [creates a stack with CloudFormation](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/src/bin/gl-cloudformation). The `gl-cloudprovision create-stack` uses [aws cloudformation create-stack](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) behind the scenes. A JSON file based on the CloudFormation template must be passed to that command. For an example of this type of JSON file, see [`cf_create_stack.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/cf_create_stack.json). With this type of JSON file, the command creates the infrastructure on AWS, including an EC2 instance directly from the `.gitlab-ci.yml` file. The script exists once we get confirmation that the stack setup is complete or has failed (through periodic polling).\n\n### Push to S3 and Deploy to EC2 Docker image\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/) we also provided a Docker image with [Push to S3 and Deploy to EC2 scripts](https://gitlab.com/gitlab-org/cloud-deploy/-/blob/master/aws/src/bin/gl-ec2). The `gl-ec2 push-to-s3` script pushes source code to an S3 bucket. For an example of the JSON file to pass to the `aws deploy push` command, see [`s3_push.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/s3_push.json). This code can be whatever artifact is built from a preceding build job. The `gl-ec2 deploy-to-ec2` script uses `aws deploy create-deployment` behind the scenes to create a deployment to an EC2 instance directly from the `.gitlab-ci.yml` file. For an example of the JSON template to pass, see [`create_deployment.json`](https://gitlab.com/ebaque/jekyll-demo/-/blob/deploy-to-ec2/aws/create_deployment.json). The script ends once we get confirmation that the deployment has succeeded or failed (via polling).\n\n## AWS Deployment: using GitLab CI templates to deploy to AWS\n\n### How to deploy to Elastic Container Service (ECS) with GitLab\nIn [GitLab 12.9](/releases/2020/03/22/gitlab-12-9-released/), we created a full `.gitlab-ci.yml` template called [`Deploy-ECS.giltab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Deploy/ECS.gitlab-ci.yml) that deploys to Amazon ECS and extends support for Fargate. Users can include the template in their configuration, specify a few variables, and their application will be deployed and ready to go in no time. This template can be customized for your specific needs. For example: Replacing the selected container registry, changing the path of the file location, etc.\n\n### How to deploy to Elastic Cloud Compute (EC2) with GitLab\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we created a full `.gitlab-ci.yml` template called [`CF-Provision-and-Deploy-EC2.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Deploy/EC2.gitlab-ci.yml) that provisions the infrastructure by leveraging [AWS CloudFormation](https://aws.amazon.com/cloudformation/). It then pushes your previously-built artifact to an [AWS S3 bucket](https://aws.amazon.com/s3/) and deploys the pushed content to [AWS EC2](https://aws.amazon.com/ec2/).\n\n## AWS Deployment: security  considerations\n\n### Predefined AWS CI/CD variables\n\nIn order to deploy to AWS, you must use AWS security keys to connect to to your AWS instance. Users can define this security keys as [CI/CD environment](/topics/ci-cd/) variables that can be used by the deployment pipeline.\n\nIn [GitLab 12.9](/releases/2020/03/22/gitlab-12-9-released/), we added support for predefined AWS variables. This support function helps users know which variables are required for deploying to AWS and also prevents typos and spelling mistakes.\n\n| Env. variable name | Value|\n| --- | --- |\n| `AWS_ACCESS_KEY_ID` | Your Access key ID |\n| `AWS_SECRET_ACCESS_KEY` | Your Secret access key |\n| `AWS_DEFAULT_REGION` | Your region code |\n\n### \"Just-in-time\" guidance for AWS deployments\n\n[GitLab 13.1](/releases/2020/06/22/gitlab-13-1-released/) provides just-in-time guidance for users who wish to deploy to AWS. Setting up AWS deployments isn't always as easy as we'd like it to be, so we've added in-product links to our AWS templates and documentation when you start adding AWS CI/CD variables to make it easier for you to use our AWS features. This will help you get up and running faster.\n\n![In-product guidance for AWS](https://about.gitlab.com/images/blogimages/aws_guide.png)\n\nAWS guide from CI/CD variables\n\n### Added security for the GitLab's official AWS Docker images\n\nIn [GitLab 13.5](/releases/2020/10/22/gitlab-13-5-released/), we changed the image identifier from the release version number to the Docker image digest. Docker supports immutable image identifiers and we adopted this best practice to update our cloud-deploy images. When a new image is tagged, we also programmatically retrieve the image digest upon its build and create a release note to effectively communicate this digest to users. This guarantees that every instance of the service runs exactly the same code. You can roll back to an earlier version of the image, even if that version wasn't tagged (or is no longer tagged). This can even prevent race conditions if a new image is pushed while a deploy is in progress.\n\n![Docker Image Digest](https://about.gitlab.com/images/blogimages/digest1.png)\n\nDocker image digest or release tag\n\n## AWS Deployment: auto DevOps support\n\nGitLab already supports Kubernetes users deploying to AWS EKS cluster. Click the link to read instructions about [how to deploy an application to a GitLab-managed Amazon EKS cluster with Auto DevOps](/blog/deploying-application-eks/#:~:text=The%20Auto%20DevOps%20function%20at,build%2C%20and%20deploy%20your%20application).\n\nWe also expanded Auto DevOps to support non-Kubernetes users. Users can specify their deployment target by adding the `AUTO_DEVOPS_PLATFORM_TARGET` variable under the CI/CD variables settings. Specifying the deployment target platform builds a full CI/CD pipeline that deploys to AWS targets.\n\nWe currently support:\n\n- `AUTO_DEVOPS_PLATFORM_TARGET: ECS` (added in GitLab 13.0)\n- `AUTO_DEVOPS_PLATFORM_TARGET: FARGATE` (added in GitLab 13.2)\n- `AUTO_DEVOPS_PLATFORM_TARGET: EC2` (added in GitLab 13.6)\n\nFor more information about Auto DevOps for AWS targets, see [requirements for Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/requirements.html) documentation.\n\nHere's a quick recording for how to use Auto Deploy to Amazon ECS:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/HzRhLLFlAos\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\nSpeed run on how to use auto deploy to EC2 (animation):\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/rVr-vZfNL6U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## AWS Deployment: Future plans to extend deployment support via GitLab\n\nCheck out some of the open issues below to see our plans are for the future of deploying to AWS using GitLab.\n\n- [Show AWS deployment success code in logs](https://gitlab.com/gitlab-org/gitlab/-/issues/215333): This will bring the success/failure codes from AWS into your GitLab pipeline logs, allowing you to see the deployment success code without needing to go into the AWS console to retrieve the logs.\n- [Show AWS deployment success code in pipeline view](https://gitlab.com/gitlab-org/gitlab/-/issues/232983): This will bring the success/failure codes from AWS into your GitLab pipeline, allowing you to see if the deployment job was successful in one view.\n- [Auto Deploy to AWS S3](https://gitlab.com/gitlab-org/gitlab/-/issues/219087): This will expand the supported deployment targets covered in this blog to include [S3 buckets](https://aws.amazon.com/s3/) as well.\n- [AWS integration per-environment role management](https://gitlab.com/gitlab-org/gitlab/-/issues/27107): This returns a set of temporary security credentials you can use to access AWS resources that you normally might not be able to access. This is accomplished by using the [AWS IAM](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) roles.\n\n## More material on deploying to EKS and Lambda\n\n- [Demo of how to deploy to EKS](https://docs.google.com/presentation/d/1iXnB6lvTx2_-_0ASElLUDZwyFPWILCRx54XjJkMFuw0/edit#slide=id.g6bb36a7017_2_42).\n- [Whitepaper on how to deploy on AWS from GitLab](/resources/whitepaper-deploy-aws-gitlab/).\n\nWe invite you to contribute to our other cloud provider solutions:\n\n- [Streamline GCP deployments](https://gitlab.com/groups/gitlab-org/-/epics/2706).\n- [Streamline Azure deployments](https://gitlab.com/groups/gitlab-org/-/epics/4846).\n\nAt GitLab, [everyone can contribute](https://handbook.gitlab.com/handbook/company/strategy/#contribute-with-gitlab). If you want to deploy to a target that isn't mentioned in this post, please let us know by adding an issue and linking it to our [Natively support hypercloud deployments](https://gitlab.com/groups/gitlab-org/-/epics/1804) epic.\n\nCover image by [SpaceX](https://unsplash.com/photos/uj3hvdfQujI) on [Unsplash](https://www.unsplash.com)\n",[9,563,1021,1062],"CD",{"slug":1064,"featured":6,"template":734},"deploy-aws","content:en-us:blog:deploy-aws.yml","Deploy Aws","en-us/blog/deploy-aws.yml","en-us/blog/deploy-aws",{"_path":1070,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1071,"content":1077,"config":1083,"_id":1085,"_type":14,"title":1086,"_source":16,"_file":1087,"_stem":1088,"_extension":19},"/en-us/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm",{"title":1072,"description":1073,"ogTitle":1072,"ogDescription":1073,"noIndex":6,"ogImage":1074,"ogUrl":1075,"ogSiteName":720,"ogType":721,"canonicalUrls":1075,"schema":1076},"DevOps on the edge: Upcoming collaborations between GitLab and Arm","Check out the latest news from the technical evangelist team about upcoming initiatives from GitLab and Arm.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682923/Blog/Hero%20Images/gitlab-arm-collaboration.jpg","https://about.gitlab.com/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"DevOps on the edge: Upcoming collaborations between GitLab and Arm\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2019-10-08\",\n      }",{"title":1072,"description":1073,"authors":1078,"heroImage":1074,"date":1080,"body":1081,"category":729,"tags":1082},[1079],"Priyanka Sharma","2019-10-08","\nDevOps has moved from being a trend to an established cornerstone of the software development and delivery lifecycle. Today, the best practices of DevOps are being applied, in new and unique ways, to edge computing. As a board member of the Cloud Native Computing Foundation, I participate in open source communities regularly and over the years, I have collaborated with various folks from Arm because today where there is the edge, there is Arm.\n\nAs the technical evangelism leader at GitLab, I got involved with folks from the Arm project when collaborating on [CNCF.ci](http://cncf.ci). GitLab is a complete [DevOps platform](/solutions/devops-platform/), delivered as a single application. A key component of our product is our CI/CD pipeline that is well loved and used in the industry. Arm, through its market leadership in the mobile and embedded space, is now expanding into infrastructure space for edge-to-cloud applications. There is tremendous potential to grow within this emerging space and offer software developers a frictionless environment to develop innovative software at a rapid pace, securely.\nArm is having their annual conference [Arm TechCon 2019](https://www.armtechcon.com/) this week in San Jose, California, and I thought this is a great opportunity to highlight key projects and activities happening within the ecosystem involving Arm and GitLab:\n\n### GitLab for edge base research projects\n\nEric Van Hensbergen, R&D fellow from Arm's Research team, has been leading an effort to [use GitLab for edge base research projects](https://community.arm.com/developer/research/b/articles/posts/continuous-cross-architecture-integration-with-gitlab) creating multi-architecture images using Docker containers, including running GitLab’s 64-bit Runner on Arm instances on public cloud providers such as Packet Cloud and AWS. You can [access the runner](https://packages.gitlab.com/runner/gitlab-runner) for yourself too!\n\n### Stream processing on the edge\n\nLast month at [GitLab Commit Brooklyn](/blog/wrapping-up-commit/), GitLab’s first ever user conference, Eduardo Silva, principal engineer from Arm Treasure Data, [delivered a talk on the benefits of stream processing on the edge](https://gitlabcommit2019brooklyn.sched.com/event/TPDd/picking-up-speed-logging-stream-processing) in distributed systems using [Fluent Bit](https://fluentbit.io/) (a [Fluentd](https://www.fluentd.org/) open source sub-project).\n\n### Join the CNCF CI Working Group Monthly Meeting\n\nToday, all projects on [CNCF.CI](https://cncf.ci/) are being built and tested on both x86 and Arm architecture inside a Kubernetes test environment hosted on Packet’s bare metal infrastructure. For anyone interested, the working group hosts open meetings every month. More details are available in their [Monthly Meeting doc](https://docs.google.com/document/d/1NA4N6PvNEkHX1yzaDFr19Xlru-amRxNi2pliqudmYNA/edit). It’s a great group and I recommend people attend.\n\nThere are a lot of exciting activities happening in the edge-to-cloud and DevOps space. As a developer evangelist, I know the value Arm brings to the ecosystem and am excited to see the commencement of the GitLab and Arm partnership. More announcements to come in the near future. Stay tuned!",[109,9,232,268],{"slug":1084,"featured":6,"template":734},"devops-on-the-edge-a-conversation-about-gitlab-and-arm","content:en-us:blog:devops-on-the-edge-a-conversation-about-gitlab-and-arm.yml","Devops On The Edge A Conversation About Gitlab And Arm","en-us/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm.yml","en-us/blog/devops-on-the-edge-a-conversation-about-gitlab-and-arm",{"_path":1090,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1091,"content":1097,"config":1104,"_id":1106,"_type":14,"title":1107,"_source":16,"_file":1108,"_stem":1109,"_extension":19},"/en-us/blog/docker-hub-rate-limit-monitoring",{"title":1092,"description":1093,"ogTitle":1092,"ogDescription":1093,"noIndex":6,"ogImage":1094,"ogUrl":1095,"ogSiteName":720,"ogType":721,"canonicalUrls":1095,"schema":1096},"How to make Docker Hub rate limit monitoring a breeze","Docker Hub Rate Limits are enforced and we need to find ways to monitor the remaining pull requests. Explore some ways to create a monitoring plugin for Nagios/Icinga/Sensu/Zabbix and test-drive a new Prometheus exporter in combination with Grafana.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681749/Blog/Hero%20Images/vidarnm-unsplash.jpg","https://about.gitlab.com/blog/docker-hub-rate-limit-monitoring","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make Docker Hub rate limit monitoring a breeze\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2020-11-18\",\n      }",{"title":1092,"description":1093,"authors":1098,"heroImage":1094,"date":1100,"body":1101,"category":752,"tags":1102},[1099],"Michael Friedrich","2020-11-18","\n\nWhen we learned about the [Docker Hub Rate Limit](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/), we thought about ways to mitigate and analyse the new situation. Container images are widely used and adopted for sandbox environments in [CI/CD pipelines](/solutions/continuous-integration/) and cloud-native production environments with app deployment in [Kubernetes clusters](/solutions/kubernetes/).\n\n## What is meant by Docker Hub limits?\n\nEach `docker pull` request toward the central `hub.docker.com` container registry is being counted. When a defined limit is reached, future requests are blocked and might be delayed into the next free window. [CI/CD](/topics/ci-cd/) jobs cannot be executed anymore after receiving a HTTP error `429 - too many requests` and similar errors will be seen in production deployment logs for Kubernetes.\n\nDocker defines this limit with 100 anonymous requests every six hours for the client's source IP address. If you have multiple container deployments behind an IP address, for example a company DMZ using a NAT, this limit can be reached very fast. A similar problem happens with watchtower tools which try to keep your container images updated, for example on your self-managed GitLab Runner. The limit can be raised by logging in, and by getting a paid subscription.\n\nThe question is: Where can you see the current limit and the remaining pull requests?\n\n### How to check the Docker Hub request limit?\n\nThe [Docker documentation](https://docs.docker.com/docker-hub/download-rate-limit/#how-can-i-check-my-current-rate) suggests to use CLI commands which invoke `curl` HTTP requests against the Docker Hub registry and parse the JSON response with [jq](https://stedolan.github.io/jq/).\n\nDefine the `IMAGE` variable once for the following CLI commands to use:\n\n```shell\n$ IMAGE=\"ratelimitpreview/test\"\n```\n\nObtain a token for authorization. Optionally print the variable value to verify its content.\n\n```shell\n$ TOKEN=$(curl \"https://auth.docker.io/token?service=registry.docker.io&scope=repository:$IMAGE:pull\" | jq -r .token)\n\n$ echo $TOKEN\n```\n\nThe next step is to simulate a `docker pull` request. Instead of using `GET` as HTTP request method, a `HEAD` request is sent which does not count toward the rate limit. The response headers contain the keys `RateLimit-Limit` and `RateLimit-Remaining`.\n\n```shell\n$ curl --head -H \"Authorization: Bearer $TOKEN\" https://registry-1.docker.io/v2/$IMAGE/manifests/latest\n```\n\nThe limit in the example is `2500` with remaining `2495` pull requests. `21600` defines the limit time window as six hours.\n\n```\nRateLimit-Limit: 2500;w=21600\nRateLimit-Remaining: 2495;w=21600\n```\n\n`RateLimit-Reset` can be returned too, this will be the remaining time until the limits are reset.\n\n### Create a monitoring script\n\nThe CLI commands can be turned into a programming language of your choice which provides methods for HTTP requests and better response parsing. The algorithm needs to follow these steps:\n\n* Obtain an authorization token from Docker Hub. Username/password credentials can be optionally provided, otherwise the request happens anonymously.\n* Send a `HEAD` request to the Docker Hub registry and simulate a `docker pull` request\n* Parse the response headers and extract the values for `RateLimit-Limit` and `RateLimit-Remaining`\n* Print a summary of the received values\n\nA plugin script which can be used by Nagios/Icinga/Sensu/Zabbix and others has additional requirements. It needs to implement the [Monitoring Plugins API specification](https://www.monitoring-plugins.org/doc/guidelines.html):\n\n* Print the limit and remaining count\n* Calculate a state: Ok, Warning, Critical, Unknown and print a helpful text on the shell\n* Add optional warning/critical thresholds for the remaining count. Whenever the count is lower than the threshold, the state changes to Warning/Critical and the exit code changes: `OK=0, Warning=1, Critical=2, Unknown=3`\n* Collect limit values as performance metrics for graphing and visualization\n* Add verbose mode and timeout parameters as plugin development best practices. If Docker Hub does not respond within 10 seconds as default, the plugin exits and returns `Unknown` as state.\n\nYou can download the [check_docker_hub_limit.py plugin script](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit) and integrate it into your monitoring environment.\n\n#### Use the monitoring plugin script\n\nThe [check_docker_hub_limit.py plugin script](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit) plugin is written in Python 3 and requires the `requests` library. Follow the [installation instructions](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit#installation) and run the plugin script with the `--help` parameter to see all available options:\n\n```\n$ python check_docker_hub_limit.py --help\n\nusage: check_docker_hub_limit.py [-h] [-w WARNING] [-c CRITICAL] [-v] [-t TIMEOUT]\n\nVersion: 2.0.0\n\noptional arguments:\n  -h, --help            show this help message and exit\n  -w WARNING, --warning WARNING\n                        warning threshold for remaining\n  -c CRITICAL, --critical CRITICAL\n                        critical threshold for remaining\n  -v, --verbose         increase output verbosity\n  -t TIMEOUT, --timeout TIMEOUT\n                        Timeout in seconds (default 10s)\n```\n\nRun the script to fetch the current remaining count. The plugin script exit code returns `0` being OK.\n\n```\n$ python3 check_docker_hub_limit.py\nOK - Docker Hub: Limit is 5000 remaining 4997|'limit'=5000 'remaining'=4997\n\n$ echo $?\n0\n```\n\nSpecify the warning threshold with `10000` pulls, and the critical threshold with `3000`.\nThe example shows how the state changes to `WARNING` with a current count of `4999` remaining\npull requests. The plugin script exit code changes to `1`.\n\n```\n$ python3 check_docker_hub_limit.py -w 10000 -c 3000\nWARNING - Docker Hub: Limit is 5000 remaining 4999|'limit'=5000 'remaining'=4999\n\n$ echo $?\n1\n```\n\nSpecify a higher critical threshold with `5000`. When the remaining count goes below this value,\nthe plugin script returns `CRITICAL` and changes the exit state into `2`.\n\n```\n$ python3 check_docker_hub_limit.py -w 10000 -c 5000\nCRITICAL - Docker Hub: Limit is 5000 remaining 4998|'limit'=5000 'remaining'=4998\n\n$ echo $?\n2\n```\n\nWhen a timeout is reached, or another error is thrown, the exit state switches to `3` and the output state becomes `UNKNOWN`.\n\n### Use a Prometheus exporter for rate limit metrics\n\n[Prometheus](https://prometheus.io/) scrapes metrics from HTTP endpoints. There is a variety of exporters for Prometheus to monitor host systems, HTTP endpoints, containers, databases, etc. Prometheus provides [client libraries](https://prometheus.io/docs/instrumenting/clientlibs/) to make it easier to start writing your own custom exporter. The metrics need to be exported in a [defined format](https://prometheus.io/docs/instrumenting/exposition_formats/).\n\nThe Docker Hub limit values can be fetched with obtaining an authorization token first, and then sending a `HEAD` request shown above. The code algorithm follows the ideas of the monitoring plugin. Instead of printing the values onto the shell, the metric values are exposed with an HTTP server. The Prometheus client libraries provide this functionality built-in.\n\nWe have created a [Prometheus Exporter for Docker Hub Rate Limits](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/docker-hub-limit-exporter) using the [Python client library](https://github.com/prometheus/client_python). The repository provides a demo environment with `docker-compose` which starts the exporter, Prometheus and Grafana.\n\nEnsure that [docker-compose is installed](https://docs.docker.com/compose/install/) and clone/download the repository. Then run the following commands:\n\n```\n$ cd example/docker-compose\n\n$ docker-compose up -d\n```\n\nNavigate to `http://localhost:3030` to access Grafana and explore the demo environment with the pre-built dashboard.\n\n![Grafana dashboard for Docker Hub Limit Prometheus Exporter](https://about.gitlab.com/images/blogimages/docker-hub-limit-monitoring/grafana_prometheus_docker_hub_limit_exporter_demo.png){: .shadow.medium.center}\n\nGrafana dashboard for Docker Hub Limits\n{: .note.text-center}\n\n### More monitoring/observability ideas\n\nUse the steps explained in this blog post to add Docker Hub limit monitoring. Evaluate the Prometheus exporter or the check plugin, or create your own monitoring scripts. Fork the repositories and send a MR our way!\n\n* [check-docker-hub-limit for Nagios/Icinga/Zabbix/Sensu](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/check-docker-hub-limit)\n* [docker-hub-limit-exporter for Prometheus](https://gitlab.com/gitlab-com/marketing/corporate_marketing/developer-evangelism/code/docker-hub-limit-exporter)\n\nThe Prometheus exporter and the monitoring plugin script can help to see trends and calculate usage over time. Use your own local (GitLab) container registry or one of the available caching methods described in these blog posts:\n\n* [Cache Docker images in your CI/CD infrastructure](/blog/mitigating-the-impact-of-docker-hub-pull-requests-limits/). Use this resource for possible solutions around caching and proxying.\n* [Use the Dependency Proxy](/blog/minor-breaking-change-dependency-proxy/). Learn more about the GitLab Dependency Proxy being made open source in the future.\n* [#everyonecancontribute cafe: Docker Hub Rate Limit: Mitigation, Caching and Monitoring](https://everyonecancontribute.com/post/2020-11-04-cafe-7-docker-hub-rate-limit-monitoring/). This is a community meetup hosted by Developer Evangelists at GitLab. The blog post includes a video with more insights and discussion.\n\nPhoto by [Vidar Nordli-Mathisen](https://unsplash.com/@vidarnm) from [Unsplash](https://www.unsplash.com).\n{: .note}\n",[9,563,859,731,1103],"production",{"slug":1105,"featured":6,"template":734},"docker-hub-rate-limit-monitoring","content:en-us:blog:docker-hub-rate-limit-monitoring.yml","Docker Hub Rate Limit Monitoring","en-us/blog/docker-hub-rate-limit-monitoring.yml","en-us/blog/docker-hub-rate-limit-monitoring",{"_path":1111,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1112,"content":1118,"config":1125,"_id":1127,"_type":14,"title":1128,"_source":16,"_file":1129,"_stem":1130,"_extension":19},"/en-us/blog/dotfiles-document-and-automate-your-macbook-setup",{"title":1113,"description":1114,"ogTitle":1113,"ogDescription":1114,"noIndex":6,"ogImage":1115,"ogUrl":1116,"ogSiteName":720,"ogType":721,"canonicalUrls":1116,"schema":1117},"dotfiles - Document and automate your Macbook setup","Document and automate your Macbook setup with installing tools and well-known configuration settings. Follow best practices from a developer’s point of view.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/dotfiles-document-and-automate-your-macbook-setup","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"dotfiles - Document and automate your Macbook setup\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2020-04-17\",\n      }",{"title":1113,"description":1114,"authors":1119,"heroImage":1115,"date":1120,"body":1121,"category":1122,"tags":1123},[1099],"2020-04-17","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n## What are dotfiles?\n\nWhat are dotfiles? Put simply, a dotfile is a file that starts with a dot (.), which differentiates it from other file types. Beyond that, a dotfile is a way for software developers to be more productive. Dotfiles are configuration files that help a variety of programs manage their functionality. Dotfiles are hidden by default in a Unix-based system. \n\nDotfiles can be used to set environment variables, define shell aliases, configure editors and other command-line tools, and much more. They are typically stored in the home directory of a user and are loaded when the user logs in, or when a new shell is opened.\n\nSome examples of dotfiles you may have encountered before (or not) include:\n\n* .vimrc\n* .bash_profile or .bashrc \n* .config\n* .gitconfig\n* .zshrc\n\n## Why use dotfiles?\n\nWhy use dotfiles? When I first started working on Windows, Linux and macOS many years ago, there was this steep learning curve. Not everything on the system had a good default for improved productivity, often tools were missing to create your own work environment.\n\nAt first glance, I started writing blog posts and shared my findings with everyone. This had the benefit that I exactly knew where these snippets can be found. Many years ago, systems were not so reliable and often a full operating system reinstall worked better than troubleshooting existing problems.\n\nAfter some years, many best practices had been documented in blog posts and were hard to apply in one go. Especially with working in many areas for development, professional services, ops and support, different settings and tools have been applied. At that point I had created a wiki page with text and screenshots.\n\nThe wiki page allowed for many revisions, and was kept in the internal Confluence since it contained company sensitive information. I often was asked how my Linux and macOS setup looks like and if I could share best practices.\n\nThen I learned that users share their configuration and setup commands in a Git repository which is called “dotfiles”. At first glance, a dotfile is a hidden file on a Linux/Unix system, containing tool specific settings. Common tools are Git, vim, bash storing their configuration in a dotfile in the user’s home directory.\nThese dotfiles were enriched with scripts to install additional software via package managers, and to apply certain runtime configuration after the work environment was setup the first time.\n\n## dotfiles on my Macbook\n\nMy first steps with dotfiles started at the point where updating the Confluence wiki became troublesome. It also was the only left bastion where I could not write documentation in Markdown. Within the dotfiles Git repository, best practice always has been a `README.md` file which describes more details and tips. Copying snippets and scripts for everyone also has become easier. Users can even fork the repository and use this as a basis for their own work environments.\n\nNavigate into my [dotfiles repository](https://gitlab.com/dnsmichi/dotfiles) to learn more. The following sections highlight some of my daily best practices as a Developer Evangelist at GitLab.\n\n\n### ZSH with OhMyZSH\n\nmacOS Catalina uses ZSH by default. Therefore I was looking into best practices and soon learned about OhMyZSH as shell framework. It comes with nice plugins which provide a native Git shell integration as well as exporting credentials as environment variables.\n\nIn addition to the built-in functionality, it is easy to add custom aliases and settings. Avoid clutter inside the main `.zshrc` file, instead extend `.oh-my-zsh/custom/aliases.zsh` for example. The following alias allows to clean up the local git branches very easily:\n\n```sh\n# Delete all remote tracking Git branches where the upstream branch has been deleted\nalias git_prune=\"git fetch --prune && git branch -vv | grep 'origin/.*: gone]' | awk '{print \\$1}' | xargs git branch -d\"\n```\n### Git configuration\n\nMy Git configuration is rather simple. The reason I keep it inside the dotfiles repository is that I always forget about the Git CLI commands to set them properly. Over the years, all my preferred customizations have been applied in Git itself. The only minimalistic addons are a custom `git log` command and the credential helper setting. I need that for storing the GitLab personal token and not being asked by `git push/pull` all the time.\n\n```sh\n$ cat ~/.gitconfig\n\n[user]\n\tname = Michael Friedrich\n\temail = mfriedrich@gitlab.com\n        signingkey = D14A1F16\n[alias]\n        l  = log --graph --pretty=format:'%Cred%h%Creset %C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)\u003C%an>%Creset' --abbrev-commit --date=relative --\n[credential]\n\thelper = osxkeychain\n```\n\nThe Git shell prompt integration is done with OhMyZSH as plugin. Previously I had done this with a customized Bash profile. I decided to go for ZSH with macOS Catalina and my new job at GitLab. \n\n### vim configuration\n\nEven with habits changed to using VS Code/Atom or the Web IDE, I am still very fast with vim, especially with spell checking included. I've been carrying this `.vimrc` file around for more than 10 years now and it always helped :) Here is the best of - there is more inside with syntax highlighting and restoring the cursor on re-open.\n\n\n```sh\nset viminfo='20,\u003C1000,s1000,:20,%,n~/.viminfo “ Increase buffer size for search\nset history=50            \" keep 50 lines of command line history\nset ruler                      \" show the cursor position all the time\nset nofixeol                 \" don't fix end-of-line errors\n\"set background=dark\nset background=light\nset showcmd              \" Show (partial) command in status line.\nset showmatch           \" Show matching brackets.\n```\n\n\n### Install favorite software and tools\n\nHomebrew is my favorite package manager on macOS. It ranges from adding simple tools like “htop” to applications not available in the app store like “Gimp”.\n\nIn order to keep things simple, the `brew_once.sh` script takes care of installing Homebrew, enabling the cask system for applications and installs wget/curl to setup OhMyZsh.\n\n```sh\n./brew_once.sh\n```\n\nAdditional software is installed with the “`brew.sh` script. This can be re-run any time to ensure the same state is applied. This will be useful if an application was accidentally removed. Please fork the repository and adjust everything for your likings!\n\n```sh\n./brew.sh\n```\n\nThe script takes care of installing the following:\n\n* GNU utils (sed, awk, tar, sha256sum). The macOS utils differ a bit and may break scripts made for Linux.\n* System tools (htop, pidof, pstree, grep, screen, nmap, ssh-copy-id, tree)\n* Images (imagemagick) for easy conversions\n* Archive and Git (git, git-lfs, p7zip, xz, tig, hub)\n* JSON (jq, jo)\n* Development (Ruby, Python, Go, Redis)\n* Applications as cask (Java, Atom, VS Code, Firefox, VLC, Gimp, Vagrant)\n\n\n### Enhanced Workflows with Alfred\n\n[Alfred](https://www.alfredapp.com/) is not only a Spotlight replacement for opening applications or searching files. The major selling points are custom workflows accessible by quick typing. Picking HTML color codes, querying DNS records or copying emojis all over the place have become easier with quick shortcuts.\n\nTired of locking your screen on macOS? `Cmd+Space, l, Enter` after Alfred has learned that `l` means `lock`. Yep, it remembers the most often typed commands.\n\n![Alfred Workflow Dig GitLab.com IPv6](https://about.gitlab.com/images/blogimages/alfred_workflow_dig_gitlab.com_aaaa.png){: .shadow}\n\n\n## Trackpad and keyboard\n\nI always used to have a mouse with me for my previous notebooks, and the change to the trackpad always worried me. Move the mouse, click here and on it goes. When I got my first iPad, I got introduced to just tapping and approving. \n\nI’ve learned that this setting can be enabled on macOS too for the trackpad which is my preferred input method since then. In order to automate this setting, run the following CLI commands:\n\n```sh\ndefaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true\ndefaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1\ndefaults write NSGlobalDomain com.apple.mouse.tapBehavior -int 1\n```\n\nAnother thing I prefer using - the right click for accessing the actions toolbar. By default, the secondary click is available with two fingers. My improved workflow prefers the right bottom corner of the trackpad. You can either change this inside the trackpad preferences or run these CLI commands:\n\n```sh\ndefaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadCornerSecondaryClick -int 2\ndefaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadRightClick -bool true\ndefaults -currentHost write NSGlobalDomain com.apple.trackpad.trackpadCornerClickBehavior -int 1\ndefaults -currentHost write NSGlobalDomain com.apple.trackpad.enableSecondaryClick -bool true\n```\n\nLast but not least using `tab` for changing the focus area in windows is a must have. This is hidden in the keyboard shortcut settings as `use keyboard navigation to move focus between controls`.\n\n```sh\ndefaults write NSGlobalDomain AppleKeyboardUIMode -int 3\n```\n\nAdditional ideas and commands can be found in [this repository](https://github.com/herrbischoff/awesome-macos-command-line).\n\n## Documentation\n\nEverything else is stored in the GitLab repository, including the documentation in Markdown format. Additional screenshots can be put there too into the `doc/images` directory for example.\nThere is no need to carry an extra wiki page around and everyone can access the repository via URL. You can also download the whole archive and work offline.\n\nThe best thing about my dotfiles repository - it is open source and we can learn, share, suggest together :-)\nDo you have a cool idea for making this even more convenient? Fork the [repository](https://gitlab.com/dnsmichi/dotfiles) and send a merge request please!\n","unfiltered",[9,9,1124,1124],"agile",{"slug":1126,"featured":6,"template":734},"dotfiles-document-and-automate-your-macbook-setup","content:en-us:blog:dotfiles-document-and-automate-your-macbook-setup.yml","Dotfiles Document And Automate Your Macbook Setup","en-us/blog/dotfiles-document-and-automate-your-macbook-setup.yml","en-us/blog/dotfiles-document-and-automate-your-macbook-setup",{"_path":1132,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1133,"content":1139,"config":1149,"_id":1151,"_type":14,"title":1152,"_source":16,"_file":1153,"_stem":1154,"_extension":19},"/en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"title":1134,"description":1135,"ogTitle":1134,"ogDescription":1135,"noIndex":6,"ogImage":1136,"ogUrl":1137,"ogSiteName":720,"ogType":721,"canonicalUrls":1137,"schema":1138},"Fast Python Flask server deployment with GitLab + Google Cloud","This tutorial shows how to use GitLab’s Google Cloud integration to deploy a Python Flask server in less than 10 minutes, helping developers become more independent and efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098427/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750098427691.png","https://about.gitlab.com/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Fast Python Flask server deployment with GitLab + Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Jerez Solis\"}],\n        \"datePublished\": \"2024-11-04\",\n      }",{"title":1134,"description":1135,"authors":1140,"heroImage":1136,"date":1143,"body":1144,"category":752,"tags":1145},[1141,1142],"Noah Ing","Jerez Solis","2024-11-04","Deploying an application to the cloud often requires assistance from\nproduction or DevOps engineers. GitLab's Google Cloud integration empowers\ndevelopers to handle deployments independently. In this tutorial, you'll\nlearn how to deploy a Python Flask server to Google Cloud in less than 10\nminutes. Whether you’re a solo developer or part of a large team, this setup\nallows you to deploy applications efficiently.\n\n\nYou'll learn how to:\n\n\n- Create a new project in GitLab\n\n- Create a Flask server utilizing `main.py`\n\n- Utilize the Google Cloud integration to create a Service account\n\n- Utilize the Google Cloud integration to create Cloud Run via a merge\nrequest\n\n- Access your newly deployed Flask server\n\n- Clean up your environment\n\n\n## Prerequisites:\n\n- Owner access on a Google Cloud Platform project\n\n- Working knowledge of Python\n\n- Working knowledge of GitLab CI\n\n- 10 minutes\n\n\n## Step-by-step Python Flask server deployment to Google Cloud\n\n\n**1. Create a new project in GitLab.**\n\n\nWe decided to call our project \"python-flask-cloud-run\" for simplicity.\n\n\n![python flask server - create a new project in\nGitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098438036.png)\n\n\n**2. Create a flask server utilizing main.py demo.**\n\n\nFind the `main.py` demo here:\n[https://gitlab.com/demos/applications/python-flask-cloud-run](https://gitlab.com/demos/applications/python-flask-cloud-run).\n\n\n```python\n\nimport os\n\n\nfrom flask import Flask\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\n\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\nif __name__ == \"__main__\":\n    app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n```\n\n\n**3. Create a `requirements.txt` with the following dependencies.**\n\n\n```\n\nFlask==3.0.3\n\ngunicorn==22.0.0\n\nWerkzeug==3.0.3\n\n```\n\n\n**4. Utilizing the Google Cloud integration, create a Service account.**\n\n\nNavigate to **Operate > Google Cloud > Create Service account**.\n\n\n![python flask server - create service\naccount](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098438037.png)\n\n\n**5. Also configure the region you would like the Cloud Run instance to\ndeploy to.**\n\n\n![python flask server - configure the\nregion](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098438038.png)\n\n\n**6. Utilizing the Google Cloud integration, configure Cloud Run via merge\nrequest.**\n\n\n![python flask server -\ndeployments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098438041.png)\n\n\n**7. This will open a merge request. Immediately merge this merge request.**\n\n\n![python flask server - enable deployments to Cloud\nRun](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098438043.png)\n\n\n**Note:** `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`,\n`GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the\nprevious steps.\n\n\n![python flask server -\nvariables](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098438044.png)\n\n\n**8. Voila! Check your pipeline and you will see you have successfully\ndeployed to Google Cloud Run utilizing GitLab CI.**\n\n\n![python flask server - update\ndockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098438045.png)\n\n\n\u003Cbr>\u003C/br>\n\n\n![python flask server -\ndockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098438046.png)\n\n\n**9. Click the Service URL to view your newly deployed Flask server.**\n\n\nNavigate to **Operate > Environments** to see a list of deployments for your\nenvironments.\n\n\n![python flask server - deployments\nlist](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098438047.png)\n\n\nBy clicking on the environment called **main**, you’ll be able to view a\ncomplete list of deployments specific to that environment.\n\n\n![python flask server - main job\nlisting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098438048.png)\n\n\n## Next steps\n\n\nTo get started with developing your Flask application, try adding another\nendpoint. For instance, in your `main.py` file, you can add a **/bye**\nendpoint as shown below:\n\n\n```\n\n@app.route(\"/\")\n\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\n```\n\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy\nthe updates. Once it’s complete, go back to the Service URL and navigate to\nthe **/bye** endpoint to see the new functionality in action.\n\n\n## Clean up\n\n\nTo prevent incurring charges on your Google Cloud account for the resources\nused in this tutorial, you can either delete the specific resources or\ndelete the entire Google Cloud project. For detailed instructions, refer to\nthe [cleanup\nguide](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n\n> For more DevSecOps capabilities, [start a free trial of GitLab\nUltimate and GitLab\nDuo](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com/blog/%2F).\n",[1146,9,1147,1148],"tutorial","google","solutions architecture",{"slug":1150,"featured":91,"template":734},"fast-python-flask-server-deployment-with-gitlab-google-cloud","content:en-us:blog:fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","Fast Python Flask Server Deployment With Gitlab Google Cloud","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"_path":1156,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1157,"content":1163,"config":1169,"_id":1171,"_type":14,"title":1172,"_source":16,"_file":1173,"_stem":1174,"_extension":19},"/en-us/blog/five-ways-to-streamline-cloud-adoption",{"title":1158,"description":1159,"ogTitle":1158,"ogDescription":1159,"noIndex":6,"ogImage":1160,"ogUrl":1161,"ogSiteName":720,"ogType":721,"canonicalUrls":1161,"schema":1162},"5 ways to streamline your cloud adoption","As companies migrate to the cloud, consider these helpful tips for making the move smoother and more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663930/Blog/Hero%20Images/daytime-clouds_1800x945.png","https://about.gitlab.com/blog/five-ways-to-streamline-cloud-adoption","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 ways to streamline your cloud adoption\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sharon Gaudin\"}],\n        \"datePublished\": \"2023-09-05\",\n      }",{"title":1158,"description":1159,"authors":1164,"heroImage":1160,"date":1166,"body":1167,"category":576,"tags":1168},[1165],"Sharon Gaudin","2023-09-05","\nMoving to the cloud makes sense to a lot of companies — it’s getting there that can be difficult.\n\n[GitLab’s 2023 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/) showed that migrating to the cloud can help organizations release software faster: Respondents who were running at least 25% of their applications in the cloud were twice as likely to release software faster than they were a year ago.\n\nHowever, the migration, whether to a single-cloud service or a multi-cloud environment, can be a big lift. IT teams are tasked with securing major data stores and workloads, navigating the complexities of moving legacy applications, and ensuring that cloud environments comply with applicable data regulations and laws. It can be complicated, with a lot of moving pieces that are often difficult to track.\n\nAnd the longer a migration drags on, the more things can go wrong and the more expensive it can get. It only makes sense to look for a way to make something so critical to the business easier, faster, and less expensive.\n\nAbubakar Siddiq Ango, developer evangelism program manager at GitLab, and Fatima Sarah Khalid, developer evangelist at GitLab, share five ways organizations can alleviate some of the time-consuming, repetitive, and arduous tasks it takes to successfully make that move.\n\n## 1. Take care of your data\nOne of the most difficult parts of a cloud migration is moving the data itself – especially if it’s complex and stored across multiple systems – but there are a few ways you can organize and streamline the tasks involved to make them more straightforward. For example, to save time and increase efficiency, Khalid notes that team members can create [issues](https://docs.gitlab.com/ee/user/project/issues/), break tasks down into [milestones](/blog/tackle-nists-plan-of-action-and-milestones-with-gitlabs-risk-management-features/), and use the [Roadmap](https://docs.gitlab.com/ee/user/group/roadmap/) feature, which gives teams a more granular view of their workflow.\n\n## 2. Avoid security pitfalls\n[Security](/blog/its-time-to-put-the-sec-in-devsecops/) should be a key consideration in any cloud migration. Moving to a cloud environment can inadvertently cause misconfigured servers, unsecure APIs, compliance infringements, and data loss. Any of these problems can trip up cloud migration efforts and expose the company to risk.\n\nTo ensure the move to the cloud proceeds smoothly while minimizing security risks, Ango says teams can use [container](https://docs.gitlab.com/ee/user/application_security/container_scanning/) and [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) and [static application security testing](https://docs.gitlab.com/ee/user/application_security/sast/) (SAST) to identify and remediate known vulnerabilities in container images, dependencies, and source code. Teams also can use features such as [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html) analysis to supplement existing code review processes and ensure that the project’s code is simple, high-quality, and straightforward to maintain — and, therefore, less likely to cause issues during the migration.\n\n## 3. Automate compliance\n[Compliance](/blog/top-5-compliance-features-to-leverage-in-gitlab/) is another critical issue. IT teams need to ensure the new cloud environment continues to meet all of the organization's regulatory requirements — a potentially large number of standards. That means making sure processes and safeguards focused on data protection are in place and cover the information and applications being moved to the cloud. Manually, that can involve spreadsheets, seemingly endless checklists, and cross-functional teams of people culling through data. Automation makes this more streamlined, requires far fewer people to navigate the process, and is simpler to manage. Automated DevOps practices, like security scanning, [policy automation](/solutions/compliance/), and making compliance standards part of the CI/CD pipeline, all act as guardrails to [keep an organization’s compliance needs on track](/blog/the-importance-of-compliance-in-devops/). With these tools at hand, team members can trust that when they create compliance frameworks and policies, the associated rules will be automatically deployed and enforced throughout the software development lifecycle.\n\n## 4. Relieve configuration challenges\nSetting up and configuring a cloud platform can be a time-consuming and complicated job, but [CI/CD capabilities](/blog/introducing-ci-components/) help automate the configuration process, says Ango. With CI templates, teams can build and deploy applications to different cloud providers or installation targets without having to write their own CI script every time. For instance, [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), a collection of pre-configured features and integrations, uses CI/CD templates to handle deployments on each different cloud environment.\n\nThe [GitLab agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/install/) also can offer integration capabilities for different cloud providers and services. The agent, which helps set up GitOps, automatically deploys workloads to Kubernetes clusters. Any time new changes are made, it pulls them in and deploys them into a cluster.\nAlso, teams can use [GitLab and Terraform for infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/), removing the complexities of making configuration changes repeatable, traceable, and more scalable, which is essential for cloud environments.\n\n## 5. Go multi-cloud\nWhile some companies are making initial moves to the cloud, others are expanding from a single cloud to a multi-cloud environment. This strategy enables organizations to run different workloads on different cloud platforms. Being cloud agnostic means they can use the same development tools and internal processes, and then choose where they want to have their workloads run based on their business needs. Problems can arise, though, when IT teams turn to vendor-locked, cloud native developer tools, which are tailored to their own services and might, or might not, support other cloud environments. Using different tools for each cloud platform isn’t efficient, so it’s key to find tools that are cloud or provider agnostic.\n\n## Uncomplicate cloud migration with a DevSecOps platform\nYes, there are different ways to ease a cloud migration – but do teams have to go out and round up a dozen different tools to ensure their migration is fast, secure, and compliant? No, they don't.\n\n“A lot of teams are realizing that having a single, unified place to simplify, automate, and manage the process of setting up or migrating to the cloud is a game changer,” says Khalid. “With an end-to-end [DevSecOps platform](/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards/), users are able to deploy to any of the common public clouds; support collaboration through features like merge requests, code reviews, and issue tracking; support integrations with a variety of third-party tools; and have built-in security features that allow teams to meet their needs.”\n\nTaking advantage of the GitLab DevSecOps Platform can uncomplicate a lot of those adoption challenges. And GitLab works with any cloud provider.\n\n“I know when people think about the GitLab platform, they focus on security, source code management, and [collaboration](/blog/5-ways-collaboration-boosts-productivity-and-your-career/). But we also really should be thinking about how it’s a tool that helps organizations get their [workload to the cloud](/blog/shifting-from-on-prem-to-cloud/),” says Ango. “You have to be able to work fast, move fast and deploy fast on whatever cloud environment you need, and do it all securely. That is what GitLab offers. That is a big deal.”\n\n_To find the features — all in one place — that your organization needs to ease and speed a cloud migration, check out this [free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/)._\n",[573,9,1021,1062],{"slug":1170,"featured":6,"template":734},"five-ways-to-streamline-cloud-adoption","content:en-us:blog:five-ways-to-streamline-cloud-adoption.yml","Five Ways To Streamline Cloud Adoption","en-us/blog/five-ways-to-streamline-cloud-adoption.yml","en-us/blog/five-ways-to-streamline-cloud-adoption",{"_path":1176,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1177,"content":1183,"config":1189,"_id":1191,"_type":14,"title":1192,"_source":16,"_file":1193,"_stem":1194,"_extension":19},"/en-us/blog/fluentd-using-gitlab-ci-cd",{"title":1178,"description":1179,"ogTitle":1178,"ogDescription":1179,"noIndex":6,"ogImage":1180,"ogUrl":1181,"ogSiteName":720,"ogType":721,"canonicalUrls":1181,"schema":1182},"Thanks Fluentd for betting on GitLab CI/CD!","We're happy to support fresh CNCF graduate Fluentd with GitLab CI/CD, and excited about their latest innovation offering stream processing on the edge.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678614/Blog/Hero%20Images/gitlab-fluentd.png","https://about.gitlab.com/blog/fluentd-using-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Thanks Fluentd for betting on GitLab CI/CD!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2019-05-21\",\n      }",{"title":1178,"description":1179,"authors":1184,"heroImage":1180,"date":1185,"body":1186,"category":729,"tags":1187},[1079],"2019-05-21","\nFluentd, the [latest project to graduate](https://www.fluentd.org/blog/fluentd-cncf-graduation) in the CNCF, announced on stage at KubeCon Barcelona today that it is using [GitLab CI/CD](/solutions/continuous-integration/) for continuous integration. We are thrilled about the shout out and honored to support such an influential and innovative project.\n\nFor those who haven’t yet worked with Fluentd, it is an [open source data collector](https://www.fluentd.org/architecture), which lets you unify the data collection and consumption for a better use and understanding of data. Fluent Bit is their lighter-weight forwarder for those with exacting memory requirements. The project sports 7,868 stars on GitHub and their community has contributed more than 900 contributed plugins. They witness more than 100K downloads a day!\n\nThe latest innovation from Fluentd around [stream processing on the edge](https://docs.fluentbit.io/stream-processing/) can be very useful for our industry. As many of those who monitor large-scale, complex, distributed systems, run IoT businesses, or build smart cities will attest, more and more data is generated by these systems and analysis often needs to happen blazingly fast to be meaningful. The standard data analysis model, where it is first stored and indexed in a database (presumably in some cloud) and then analyzed, is not good enough for some real-time and complex analysis needs. The latencies associated with such data transfer may not be able to support applications involving time-critical, data-driven decision making. With Fluent bit, the Fluent team is looking to process the data while it's still in motion in the Log processor – bringing a lot of advantages of speed.\n\nWhile I am reading papers by others attempting to build stream processing on the edge, I find Fluentd’s efforts exciting because they already have major community traction and are part of companies’ observability workflows for logging. The [CNCF graduation criteria](https://github.com/cncf/toc/blob/master/process/graduation_criteria.adoc) that Fluentd met will further embolden enterprises to try it out, as part of the requirements are a diverse contributor community and security audits.\n\nWe've spent the past few months collaborating with Fluentd on their CI needs, and it's been very educational for us. We learned about the unique challenges that fast-moving projects in the CNCF face, and how we can be of assistance with our CI/CD offering. A large part of the answer is providing clear and consistent guidance around converting pipelines and then supporting the projects to success. If you are a CNCF project interested in working with GitLab CI/CD, holler at us and we’d be delighted to help.\n\nUntil then, enjoy KubeCon Barca!\n",[109,731,1188,9,278,859],"demo",{"slug":1190,"featured":6,"template":734},"fluentd-using-gitlab-ci-cd","content:en-us:blog:fluentd-using-gitlab-ci-cd.yml","Fluentd Using Gitlab Ci Cd","en-us/blog/fluentd-using-gitlab-ci-cd.yml","en-us/blog/fluentd-using-gitlab-ci-cd",{"_path":1196,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1197,"content":1203,"config":1209,"_id":1211,"_type":14,"title":1212,"_source":16,"_file":1213,"_stem":1214,"_extension":19},"/en-us/blog/from-idea-to-production-on-thousands-of-clouds",{"title":1198,"description":1199,"ogTitle":1198,"ogDescription":1199,"noIndex":6,"ogImage":1200,"ogUrl":1201,"ogSiteName":720,"ogType":721,"canonicalUrls":1201,"schema":1202},"From idea to production on thousands of clouds","Deliver cloud native applications in more places consistently at scale with GitLab and Gravity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679266/Blog/Hero%20Images/blue-lights.jpg","https://about.gitlab.com/blog/from-idea-to-production-on-thousands-of-clouds","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From idea to production on thousands of clouds\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ev Kontsevoy\"}],\n        \"datePublished\": \"2019-11-20\",\n      }",{"title":1198,"description":1199,"authors":1204,"heroImage":1200,"date":1206,"body":1207,"category":300,"tags":1208},[1205],"Ev Kontsevoy","2019-11-20","\nToday, deploying an application with GitLab is easier than ever: just create a Kubernetes cluster on your cloud of choice, connect it to GitLab with the Kubernetes integration, and Auto DevOps creates a full deployment pipeline for you.\n\nBut what if you need your app to run in two clusters in two separate regions? Ten clusters across multiple cloud providers? A hundred clusters and also on a fleet of self-driving trucks?\n\nAt [Gravitational](https://gravitational.com), we believe the future should not belong to a single cloud provider and developers should be able to run their applications anywhere with the same simplicity as having a single Kubernetes cluster.\n\nI am a huge fan of GitLab. I’ve had the great pleasure of getting to know much of the founding team [over the years](https://about.gitlab.com/blog/gitlab-joins-forces-with-gravitational/) and was happy to provide my [own contribution](https://gitlab.com/gitlab-org/gitlab-foss/issues/22864) to the community a while back. Today, I’m happy to share some thoughts on how to build with GitLab and deploy applications into dozens or even hundreds of cloud environments. \n\n## The rise of multicloud\n\nHow do you run applications in different data centers? Do you need to rewrite them from scratch? AWS may still be the dominant cloud provider, but cloud competitors are eating into their lead. It’s not just the big public cloud companies either. [Private cloud data centers](https://www.forbes.com/sites/jasonbloomberg/2019/02/02/have-private-clouds-finally-found-their-place-in-the-enterprise/#2f859685604f) are growing just as rapidly.\n\nMany companies that need to meet tough security and compliance requirements will require applications to run in their bare metal data centers. Running an application on an on-premises or even air-gapped data center adds additional complexity due to the hundreds or even thousands of dependencies in modern applications.\n\nGravitational has built Gravity, an open source [Kubernetes packaging solution ](https://gravitational.com/gravity/)that allows developers to build “cluster images” (similar to VM images) that can contain an entire Kubernetes cluster pre-loaded with multiple applications. You would use GitLab to go from idea to production, and Gravity to expand your production to anywhere in the world. \n\nStatements like, “I have snapshotted our entire production environment and emailed it to you, so you can run it in your private data center,” will not seem completely crazy.\n\nGravity uses standard, upstream CNCF-supported tooling for creating \"images\" of Kubernetes clusters containing the applications and their dependencies. The resulting files are called cluster images which are just .tar files.\n\nA cluster image can be used to recreate full replicas of the original environments for any deployment environment where compliance and consistency matter, i.e. in locked-down AWS/GCP/Azure environments or even in air-gapped server rooms. Each image includes all dependencies to spin up a full cluster, as well as the Gravity daemon that handles the most common operational tasks associated with Kubernetes applications, and it monitors and alerts human operators of problems.\n\n## Deploy with GitLab, scale with Gravity\n\n![Gravity dashboard](https://about.gitlab.com/images/blogimages/gravity-dashboard.png)\n\nDevelopers can leverage a GitLab repository as a single source of truth for rolling out a Kubernetes app and leverage [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) for continuous delivery.\n\nAny project of meaningful scale begins by defining an [epic](https://docs.gitlab.com/ee/user/group/epics/) with goals, milestones, and tasks. An [issue](https://docs.gitlab.com/ee/user/project/issues/#issues) is the main object for collaborating on ideas and planning work. GitLab’s [package and container registry](https://about.gitlab.com/stages-devops-lifecycle/package/) helps you manage and package dependencies. \n\n[The GitLab Kubernetes integration](https://docs.gitlab.com/ee/user/project/clusters/) allows customers to create Kubernetes clusters, utilize review apps, run pipelines, use web terminals, deploy apps, view pod logs, detect and monitor Kubernetes, and much more. For deploying a Kubernetes cluster in a single destination, GitLab provides everything you need from start to finish. \n\nHowever, if your customers need to run your application in their private data centers, they can use Gravity, which essentially copy/pastes the entire Kubernetes cluster environment you’ve built in GitLab. \n\n[Download](https://gravitational.com/gravity/download/) and set up the Gravity open source edition following our [quickstart guide](https://gravitational.com/gravity/docs/quickstart/). From Gravity, you can build a cluster image of your Kubernetes application. Gravity’s [documentation](https://gravitational.com/gravity/docs/overview/) will walk you through the steps required to build an image manifest that describes the image build, the installation process, and the system requirements for the cluster. \n\nYou can build empty Kubernetes cluster images to quickly create a large number of identical, production-ready Kubernetes clusters within an organization, or you can build a cluster image that also includes Kubernetes applications to distribute your application to third parties. \n\n## Next steps\n\nIf you want to learn more about working with Kubernetes, start with [Kubernetes 101](https://www.youtube.com/watch?v=rq4GZ_GybN8). You’ll learn how GitLab and Kubernetes interact at various touchpoints. And, if you’re looking for a way to port your applications to new environments, check out [Gravity](https://gravitational.com/gravity). \n\n## About the guest author\n\nEv is a co-founder and the CEO of Gravitational. Before Gravitational, he launched the on-demand OpenCompute servers at Rackspace. Prior to Rackspace, he co-founded Mailgun, the first email service built for developers. Ev has been a fighter against unnecessary complexity in software for 20 years. He abhors cars but loves trains and open source software that doesn't require an army of consultants to operate.\n\n## About Gravitational\n\n[Gravitational](https://gravitational.com) helps companies deliver cloud applications across cloud providers, on-premises environments, and even air-gapped server rooms. Products include Teleport for multi-cloud privileged access management that doesn't get in the way of developer productivity, and Gravity, a Kubernetes packaging solution that takes the drama out of on-prem deployments. Gravitational was founded in 2015 and recently [announced their Series A](https://gravitational.com/blog/gravitational-series-a-funding/). \n\nCover image by [Sharon McCutcheon](https://unsplash.com/@sharonmccutcheon) on [Unsplash](https://unsplash.com/photos/TMwHpCrU8D4)\n",[9,563,232,859,109,755],{"slug":1210,"featured":6,"template":734},"from-idea-to-production-on-thousands-of-clouds","content:en-us:blog:from-idea-to-production-on-thousands-of-clouds.yml","From Idea To Production On Thousands Of Clouds","en-us/blog/from-idea-to-production-on-thousands-of-clouds.yml","en-us/blog/from-idea-to-production-on-thousands-of-clouds",{"_path":1216,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1217,"content":1223,"config":1230,"_id":1232,"_type":14,"title":1233,"_source":16,"_file":1234,"_stem":1235,"_extension":19},"/en-us/blog/gcp-move-update",{"title":1218,"description":1219,"ogTitle":1218,"ogDescription":1219,"noIndex":6,"ogImage":1220,"ogUrl":1221,"ogSiteName":720,"ogType":721,"canonicalUrls":1221,"schema":1222},"Update on our planned move from Azure to Google Cloud Platform","GitLab.com is migrating to Google Cloud Platform August 11 – here’s what this means for you now and in the future.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671280/Blog/Hero%20Images/gitlab-gke-integration-cover.png","https://about.gitlab.com/blog/gcp-move-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update on our planned move from Azure to Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David Smith\"}],\n        \"datePublished\": \"2018-07-19\",\n      }",{"title":1218,"description":1219,"authors":1224,"heroImage":1220,"date":1226,"body":1227,"category":752,"tags":1228},[1225],"David Smith","2018-07-19","\n\nNOTE to users in Crimea, Cuba, Iran, North Korea, Sudan, and Syria: GitLab.com may\nnot be accessible after the migration to Google. Google has informed us that\nthere are legal restrictions that are imposed for those countries. See this\n[U.S. Department of the Treasury link](http://www.treasury.gov/resource-center/sanctions/Programs/Pages/Programs.aspx)\nfor more details. At this time, we can only recommend that you download\nyour code or export relevant projects as a backup. See [this issue](https://gitlab.com/gitlab-com/migration/issues/649)\nfor more discussion.\n{: .alert .alert-warning}\n\nUpdate as of August 1: There will be a short maintenance window on Saturday, August 4 at 13:00 UTC. We will perform a test of approximately 1 hour.  This will help us verify some of our fixes to make sure the switchover goes as planned.\n{: .alert .alert-info}\n\nUpdate as of July 27: There will be a short maintenance window on Saturday, July 28 at 13:00 UTC. We will perform a short test of approximately 5 minutes.  This will help us verify some of our fixes to make sure our Chef runs work correctly with GitLab.com inaccessible.\n{: .alert .alert-info}\n\nUpdate as of July 24: Following our dry run of the migration on Saturday, July 21, we have rescheduled the migration with a new target date of Saturday, August 11. You can read through [our findings document](https://docs.google.com/document/d/1Y7Cv4BHmHw8djtDBex8opUGs8t0wWmgrueaCocKfYxs/edit?usp=sharing) for all the details.\n{: .alert .alert-info}\n\nImproving the performance and reliability of [GitLab.com](/pricing/)  has been a top priority for us. On this front we've made some incremental gains while we've been planning for a large change with the potential to net significant results: running GitLab as a [cloud native](/topics/cloud-native/) application on Kubernetes.\n\nThe next incremental step on our cloud native journey is a big one: migrating from Azure to Google Cloud Platform (GCP). While Azure has been a great provider for us, GCP has the best Kubernetes support and we believe will the best provider for our long-term plans. In the short term, our users will see some immediate benefits once we cut over from Azure to GCP including encrypted data at rest on by default and faster caching due to GCP's tight integration with our existing CDN.\n\n## Upcoming maintenance windows for the GCP migration\n\nAs an update to [our earlier blog post on the migration](/blog/moving-to-gcp/), this is a short post to let our community know we are planning on performing the migration of GitLab.com the weekend of ~~July 28~~ August 11 (this has been rescheduled following our dry run on July 21). We have a maintenance window coming up that we would like to make sure everybody knows about.\n\n### What you need to know:\n\nDuring the maintenance windows, the following services will be unavailable:\n\n* SaaS website ([GitLab.com](https://gitlab.com/) will be offline, but [about.gitlab.com](https://about.gitlab.com/) and [docs.gitlab.com](https://docs.gitlab.com/) will still be available)\n* Git ssh\n* Git https\n* registry\n* CI/CD\n* Pages\n\n### Maintenance window - Dry run - Saturday, July 21 at 13:00 UTC\n\nAs a further update to our testing, we are planning to take a short maintenance window this weekend on Saturday, July 21 at 13:00 UTC to do final readiness checks.\nThis maintenance window should last one hour.\n\n2018-07-23 UDPATE: Here are the [finding from the maintenance window](https://docs.google.com/document/d/1Y7Cv4BHmHw8djtDBex8opUGs8t0wWmgrueaCocKfYxs/edit). We've decided to push our target date from July 28th to August 11th to comfortably address several issues. We will likely do a small maintenance window on Saturday, July 28th, and another full practice on Saturday, August 4th.\n\n### Maintenance window - Short test - Saturday, July 28 at 13:00 UTC\n\nWe will perform a short test of approximately 5 minutes.  This will help us verify some of our fixes to make sure our Chef runs work correctly with GitLab.com inaccessible.\n\n\n### Maintenance window - Dry run - Saturday, August 4 at 13:00 UTC\n\nWe will repeat the dry run exercise again to have a chance to verify our changes to the switchover plan.\n\n\n### Maintenance window - Actual switchover - Saturday, ~~July 28~~ August 11 at 10:00 UTC\n\nOn the day of the migration, we are planning to start at 10:00 UTC.  The time window for GitLab.com to be in maintenance is currently planned to be two hours.  Should any times for this change, we will be updating on the channels listed below. When this window is completed GitLab.com will be running out of GCP.\n\n* [GitLab Status page](https://status.gitlab.com/)\n* [GitLab Status Twitter](https://twitter.com/gitlabstatus)\n\n### GitLab Pages and custom domains\n\nIf you have a custom domain on [GitLab Pages](https://about.gitlab.comhttps://docs.gitlab.com/ee/user/project/pages/):\n\n* We will have a proxy in place so you do not have to change your DNS immediately.\n* GitLab Pages will ultimately go to 35.185.44.232 after the July 28 migration.\n* Do not change your DNS to this new address until we have successfully completed the migration.\n* We will post an update to our blog about when the cutoff will be for changing DNS from our Azure address to GCP for GitLab Pages.\n\nShould you need support during the migration, please reach out to [GitLab Support](https://about.gitlab.com/support/).\n\nWish us luck!\n",[1147,9,1229,859],"GKE",{"slug":1231,"featured":6,"template":734},"gcp-move-update","content:en-us:blog:gcp-move-update.yml","Gcp Move Update","en-us/blog/gcp-move-update.yml","en-us/blog/gcp-move-update",{"_path":1237,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1238,"content":1244,"config":1252,"_id":1254,"_type":14,"title":1255,"_source":16,"_file":1256,"_stem":1257,"_extension":19},"/en-us/blog/gitlab-achieves-kcsp-status",{"title":1239,"description":1240,"ogTitle":1239,"ogDescription":1240,"noIndex":6,"ogImage":1241,"ogUrl":1242,"ogSiteName":720,"ogType":721,"canonicalUrls":1242,"schema":1243},"GitLab achieves CNCF Kubernetes certified provider status","GitLab is all-in on cloud native and now that we're CNCF Certified Service Providers we'll be able to help other companies do the same.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681517/Blog/Hero%20Images/kubernetes-certified-service-provider-blog-cover.png","https://about.gitlab.com/blog/gitlab-achieves-kcsp-status","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"It's official: GitLab has achieved CNCF Kubernetes Certified Provider status\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2020-08-24\",\n      }",{"title":1245,"description":1240,"authors":1246,"heroImage":1241,"date":1248,"body":1249,"category":1250,"tags":1251},"It's official: GitLab has achieved CNCF Kubernetes Certified Provider status",[1247],"Vick Kelkar","2020-08-24","\n\nGitLab is pleased to announce that we are now a Kubernetes Certified Service Provider (KCSP). KCSP is run by the Cloud Native Computing Foundation (CNCF) in collaboration with the Linux Foundation. The intention behind the KCSP program is to ensure that enterprises get the support they need to roll out applications to production Kubernetes environments. GitLab, through its KCSP status, wants to help organizations to adopt a [cloud native](/topics/cloud-native/) approach for their business objectives.\n\n## Container and Kubernetes Adoption\n\nA recent [CNCF report](https://www.cncf.io/wp-content/uploads/2020/03/CNCF_Survey_Report.pdf) shows that the use of containers in production has jumped from 23% in 2016 to 84% in 2019. According to another [CNCF survey](https://www.cncf.io/blog/2019-cncf-survey-results-are-here-deployments-are-growing-in-size-and-speed-as-cloud-native-adoption-becomes-mainstream/), cloud native technologies have become mainstream and many [CNCF projects](https://cncf.ci/) have adopted GitLab for their project needs. Kubernetes has emerged as the orchestrator of choice for organizations embarking on cloud native initiatives. Kubernetes helps organizations achieve container operational efficiencies and make developer interactions easier with strong API support. A recent [survey of IT professionals](https://blogs.vmware.com/cloudnative/2020/03/11/why-large-organizations-trust-kubernetes/) working at organizations with 1,000 or more employees found that over 50% are running Kubernetes in a production environment. This is creating demand for people who understand how to migrate, deploy, and run containerized applications in a cloud native manner.\n\n## Benefits of GitLab achieving KCSP\n\nAccording to a [451 Research report](https://clients.451research.com/reportaction/98250/Toc), even as the adoption of Kubernetes gains traction in the enterprise and [DevOps](/topics/devops/) personnel leverage Kubeneretes to automate tasks, there is still a skills gap around container administration and orchestration. GitLab, as a KCSP, can provide consulting, training, support, workshops, and professional services to enterprises looking to embrace the Kubernetes cloud native approach. A [survey](https://www.cncf.io/blog/introducing-the-cncf-technology-radar/) conducted by [CNCF End User Community](https://www.cncf.io/people/end-user-community/) shows that enterprise customers were willing to try out GitLab in their production environments. GitLab offers advice to enterprise users who want to run their applications on a container scheduler like Kubernetes. As [the CNCF CTO pointed out](https://www.patreon.com/posts/open-source-is-28808432), GitLab has an open core business model and the roadmaps are public. This allows our customers and community to contribute features back into the GitLab project. GitLab can provide guidance on GitOps, DevOps and DevSecOps approaches to organizations adopting Kubernetes.  Achieving KCSP status allows us to offer trusted advice to our customers and to help enterprises adopt Kubernetes for production workloads.\n\n## What’s next\n\nGitLab, being an open-source minded company, is committed to the success of Kubernetes as an open-source technology. Kubernetes is seeing wide adoption in the industry for scaling and management of containerized workloads. GitLab can help deliver workloads securely onto a Kubernetes cluster. You can run GitLab on Kubernetes using our [helm charts](https://docs.gitlab.com/charts/) as well. Achieving the KCSP milestone shows GitLab’s commitment to grow and support the Kubernetes project and the CNCF community.  \n\nTo learn more about the KCSP program and CNCF program, visit their respective websites at [KCSP](https://www.cncf.io/certification/kcsp/) and [CNCF](https://www.cncf.io/). GitLab believes in a world where everyone can contribute. Open source organizations can learn more about [GitLab for Open Source](/solutions/open-source/). You can learn more about GitLab's Kubernetes partners [here](/resources/downloads/gitlab-partnership-roadmap.pdf).\n","news",[859,9],{"slug":1253,"featured":6,"template":734},"gitlab-achieves-kcsp-status","content:en-us:blog:gitlab-achieves-kcsp-status.yml","Gitlab Achieves Kcsp Status","en-us/blog/gitlab-achieves-kcsp-status.yml","en-us/blog/gitlab-achieves-kcsp-status",{"_path":1259,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1260,"content":1266,"config":1272,"_id":1274,"_type":14,"title":1275,"_source":16,"_file":1276,"_stem":1277,"_extension":19},"/en-us/blog/gitlab-and-google-cloud",{"title":1261,"description":1262,"ogTitle":1261,"ogDescription":1262,"noIndex":6,"ogImage":1263,"ogUrl":1264,"ogSiteName":720,"ogType":721,"canonicalUrls":1264,"schema":1265},"How GitLab and Google Cloud drive innovation and efficiency for retailers","Learn how pairing DevSecOps with multicloud environments eases the development burden on retailers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667457/Blog/Hero%20Images/open_source_program_blog_image.jpg","https://about.gitlab.com/blog/gitlab-and-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab and Google Cloud drive innovation and efficiency for retailers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2023-03-08\",\n      }",{"title":1261,"description":1262,"authors":1267,"heroImage":1263,"date":1269,"body":1270,"category":576,"tags":1271},[1268],"Regnard Raquedan","2023-03-08","\nInnovation and growth can sometimes be at odds in the world of retail, especially when trying to develop, deploy, and manage modern applications across multicloud environments. GitLab and Google Cloud together help retailers create and secure software that scales along with their business.\n\nGitLab’s comprehensive [DevSecOps Platform](/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/) connects with Google Cloud’s [Distributed Cloud Edge](https://cloud.google.com/distributed-cloud/edge/latest/docs/overview) edge networking environment and [Anthos](https://cloud.google.com/anthos/docs/concepts/overview) cloud-centric container platform to provide retailers with enterprise-class features such as collaboration and planning, continuous integration ([CI](https://docs.gitlab.com/ee/ci/)), configuration management, and built-in security and compliance.\n\nGitLab enables development teams to streamline management of their distributed, hybrid environments right out of the gate. Retailers can utilize the following capabilities:\n\n* Agile planning and collaboration to ensure Anthos cloud container cluster configurations and policies are up to date and compliant with company standards.\n* Continuous integration to help develop quality code and configurations while simultaneously reducing code errors.\n* Configuration management to roll back to previously working Anthos states or configurations.\n* Native integration with Google Cloud to deploy software faster and more securely.\n\n## Why GitLab for retail?\n\nMulticloud environments are beneficial to retailers because they enable them to easily deploy and manage applications across a vast network of stores, warehouses, and the like. In addition, developing and hosting applications in the cloud provides more choice, faster delivery (i.e. time to market), real-time data access, and the ability to automatically scale resources (up or down). As more retailers look to cloud platforms to achieve these goals, GitLab is uniquely positioned to help them manage these environments in a way that keeps them agile, secure, and able to meet customer demands. \n\nGitLab’s DevSecOps Platform is geared toward helping retailers gain operational efficiencies throughout the software development lifecycle and across [multicloud environments](https://about.gitlab.com/topics/multicloud/) like Google Cloud. \n\nDevelopers at retailers can leverage the full range of features in GitLab’s DevSecOps Platform to build, test, deploy, and secure high-performance, low-latency business-critical applications, such as point of sale. \n\n[Google Distributed Cloud Edge](https://cloud.google.com/distributed-cloud/edge/latest/docs/overview) retail customers can use GitLab to manage their hybrid cloud policies, manage configurations, and administer [Anthos](https://cloud.google.com/anthos/docs) clusters. GitLab’s industry-leading DevSecOps Platform helps developers streamline in-store technology management processes and makes it easier for DevSecOps teams to collaborate. GitLab’s DevSecOps Platform also has built-in security and compliance to meet the unique auditing and reporting needs of retailers. \n\n## Use case: Automated deployment at scale\n\nRetail companies with multiple locations need technology that enables them to manage sprawling resources and maintain smooth operations, even when major changes are introduced. With GitLab’s DevSecOps Platform, retailers can automatically sync configurations and data across their Google Cloud, as well as other cloud and on-premises environments. This is critical for large retailers looking to scale hybrid Anthos clusters vertically across their network with Google Distributed Cloud Edge machines.\n\nGitLab also lets developers easily collaborate and make changes using Agile tools, Merge Requests, and requirements-based workflows. This creates a streamlined, audit-ready process that helps team members make decisions quickly.\n\nConfigurations are stored as YAML files in GitLab repositories, where teams can use a different repository per configuration state. Anthos Configuration Management then retrieves the appropriate configurations when network access is available, allowing for specific regional changes to be made.\n\nOnce changes are reconciled across regions, the new configurations are automatically applied and propagated to the correct Google Distributed Cloud Edge nodes. This secure, scalable process can be used at thousands of locations, decreasing the company's time to value and increasing ROI.\n\nHaving the right technology is a key driver of growth and innovation for retailers. Investing in technology and utilizing platforms like GitLab and Google Cloud can be a game changer for retailers looking to thrive in today's competitive market.\n",[9,1021,979,232],{"slug":1273,"featured":6,"template":734},"gitlab-and-google-cloud","content:en-us:blog:gitlab-and-google-cloud.yml","Gitlab And Google Cloud","en-us/blog/gitlab-and-google-cloud.yml","en-us/blog/gitlab-and-google-cloud",{"_path":1279,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1280,"content":1286,"config":1293,"_id":1295,"_type":14,"title":1296,"_source":16,"_file":1297,"_stem":1298,"_extension":19},"/en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development",{"title":1281,"description":1282,"ogTitle":1281,"ogDescription":1282,"noIndex":6,"ogImage":1283,"ogUrl":1284,"ogSiteName":720,"ogType":721,"canonicalUrls":1284,"schema":1285},"Oracle and GitLab partner for cloud-native app development","Learn the benefits of deploying the DevOps platform on Oracle Cloud Infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668514/Blog/Hero%20Images/multi-cloud-future.jpg","https://about.gitlab.com/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Oracle partner for a cloud native approach to modern application development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Creighton Swank\"},{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2022-10-20\",\n      }",{"title":1287,"description":1282,"authors":1288,"heroImage":1283,"date":1290,"body":1291,"category":576,"tags":1292},"GitLab and Oracle partner for a cloud native approach to modern application development",[1289,1247],"Creighton Swank","2022-10-20","\nModern application development requires a cloud native platform that can operate in and across multiple cloud providers. GitLab has partnered with Oracle to enable customers to run GitLab’s DevOps platform on Oracle Cloud Infrastructure (OCI).\n\nWith OCI, organizations can accelerate migrations of existing enterprise workloads, deliver better reliability and performance for all applications, and offer the complete services customers need to build innovative cloud applications. With GitLab’s DevOps platform and OCI, businesses can create a resilient, high-performance DevOps environment. OCI also supports automatic operating system patching and zero trust architecture, which aligns with GitLab’s focus on [application security](/stages-devops-lifecycle/secure/).\n\n## The benefits of pairing GitLab and OCI\n\nPairing GitLab’s DevOps platform and OCI provides many benefits, including the following:\n\n- performance\n- platform breadth\n- security\n- value\n- hybrid and multi-cloud environments\n- GovCloud regions\n\n### Performance\n\nOCI provides a high-performance, resilient foundation for cloud services. Customers can quickly provision instances that feature the latest-generation processors via API, SDK, command line, Terraform, or the console. Workloads can scale up and/or out based on their requirements and compute-intensive workloads can leverage GPU shapes for hardware acceleration of AI/ML workloads. At the same time, GitLab runners can be configured to [leverage Nvidia GPUs](https://docs.gitlab.com/runner/configuration/gpus.html) for various executors to take advantage of GPUs and AI/ML workloads. \n\n### Platform breadth\n\nGitLab’s DevOps platform has the ability to integrate with Kubernetes service like OKE via GitLab Kubernetes agent. Leveraging GitLab’s Kubernetes agent will unlock [GitOps workflow](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html) and [CI/CD workflow](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) for cloud native development. And the Oracle Cloud Infrastructure also offers a wide variety of platform services that allow customers to run workloads without having to manage infrastructure. Customers can run workloads on compute instances, in containers with Oracle Kubernetes Engine (OKE), or even as serverless functions. Services like object storage and events can be leveraged to build applications without managing infrastructure at all. For a complete list of these services, please click [here](https://docs.oracle.com/en-us/iaas/Content/services.htm). \n\n### Security\n\nThe second generation of OCI has been redesigned from the ground up to be a secure cloud. Oracle designed OCI architecture for security of the platform through isolated network virtualization, highly secure firmware installation, a controlled physical network, and network segmentation. GitLab’s DevOps platform is not only an ODIC provider but the platform integrates with other identity providers to support single sign-on capabilities. The platform’s [permission model](https://docs.gitlab.com/ee/user/permissions.html#instance-wide-user-permissions) follows similar approaches used by OCI around separation of concerns and role-based access to resources. \n\n### Value\n\nMission-critical and revenue-generating applications demand more than just availability from their cloud infrastructure. Mission-critical workloads also require consistent performance and the ability to manage, monitor, and modify resources running in the cloud at any time. OCI offers end-to-end SLAs covering performance, availability, and manageability of services. \n\nGitLab’s DevOps platform uses the same code base for the SaaS offering as well as self-managed instances. Having the same code base allows customers to adopt the mission-critical DevOps platform in heavily regulated industries such as financial services and healthcare.\n\n### Support for hybrid and multi-cloud environments\n\nEven though many enterprises are moving workloads to the cloud, the reality is this is a multi-cloud world, and many enterprises still maintain infrastructure locally. Oracle has entered into strategic partnerships designed to make it easier for customers to operate in a hybrid and multi-cloud environment. \n\nOracle has partnered with VMware to create the Oracle Cloud VMware solution that allows customers the ability to use their existing tools and processes to manage a VMware environment in OCI. This allows enterprises to accelerate cloud adoption without having to re-architect their applications.\n\nGitLab’s DevOps platform can be deployed on vSphere infrastructure using the GitLab [omnibus install](https://docs.gitlab.com/omnibus/) method. The platform can be installed on-premises or in the cloud. GitLab can be deployed on VMs and the GitLab runners can extend CI capabilities into other cloud environments and [cloud-native hybrid](https://docs.gitlab.com/ee/administration/reference_architectures/#cloud-native-hybrid) deployments.\n\n### GovCloud regions\n\nOCI can provide government customers with the stringent security standards necessary to protect the federal government's data. Oracle has obtained a P-ATO from the Joint Authorization Board for FedRAMP High in its U.S. Government Cloud regions. Varying levels of DISA authorizations are also available but vary by services. Find an up-to-date list [here](https://www.oracle.com/industries/government/federal/fedramp/). Meanwhile, GitLab is pursuing a FedRAMP moderate certification and working on activities related to FedRAMP-ready designation. \n\n## Get started with the GitLab DevOps platform and OCI\nOrganizations looking to run GitLab’s DevOps platform on OCI can leverage the supported [Oracle Linux](/install/) package for the platform install. Alternatively, they can leverage the helm chart or GitLab Operator to deploy to Oracle Kubernetes Engine (OKE), which will provide a [cloud-native hybrid approach](https://docs.gitlab.com/ee/administration/reference_architectures/25k_users.html#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) of the GitLab DevOps platform on OCI.\n\nGitLab’s DevOps platform, delivered as a single application, can run on multiple clouds and has the capability of supporting various official [Linux packages](/install/). Besides Linux packages, GitLab’s platform also supports deployments on Kubernetes using [helm charts](https://docs.gitlab.com/charts/) and Kubernetes [GitLab Operator](https://docs.gitlab.com/operator/). \n\nIf you would like to learn more about the GitLab DevOps platform and OCI, please access the [LiveLabs](https://apexapps.oracle.com/pls/apex/dbpm/r/livelabs/home).\n\n_[Kelkar](https://gitlab.com/vkelkar) is GitLab's Director of Alliances. Swank is Distinguished Cloud Architect and Cloud CTO at Oracle._\n",[563,731,9,283],{"slug":1294,"featured":6,"template":734},"gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development","content:en-us:blog:gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development.yml","Gitlab And Oracle Partner For A Cloud Native Approach To Modern Application Development","en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development.yml","en-us/blog/gitlab-and-oracle-partner-for-a-cloud-native-approach-to-modern-application-development",{"_path":1300,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1301,"content":1306,"config":1310,"_id":1312,"_type":14,"title":1313,"_source":16,"_file":1314,"_stem":1315,"_extension":19},"/en-us/blog/gitlab-and-redhat-automation",{"title":1302,"description":1303,"ogTitle":1302,"ogDescription":1303,"noIndex":6,"ogImage":808,"ogUrl":1304,"ogSiteName":720,"ogType":721,"canonicalUrls":1304,"schema":1305},"GitLab and Red Hat: Automation to enhance secure software development","How our closer relationship with Red Hat will boost deployment automation.","https://about.gitlab.com/blog/gitlab-and-redhat-automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and Red Hat: Automation to enhance secure software development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2020-04-29\",\n      }",{"title":1302,"description":1303,"authors":1307,"heroImage":808,"date":813,"body":1308,"category":1250,"tags":1309},[1247],"\n\nWe're working towards a closer relationship with Red Hat and we're excited about the possibilities. We think developers can reduce time spent coding while still increase productivity with technologies from GitLab and Red Hat. Here's what you need to know.\n\n### Why GitLab?\n\nGitLab enables both the developers and operations teams to apply [DevOps](/topics/devops/) practices using a single application. Using one tool for the entire application’s lifecycle, i.e. right from development and deployment to operations, allows the organization to achieve operational efficiency and reduce deployment cycle times.\n\nGitLab not only provides source code management ([SCM](/solutions/source-code-management/)) but it also offers CI/CD to make streamlined deployments to a container platform like Red Hat OpenShift while maintaining visibility into the deployment pipelines. Furthermore, with [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/), the GitLab application also addresses the organization’s security requirements through scanning and dependency mapping for the developed application. The ability to check the license of software being used, before deploying it in a production environment, helps organizations reduce their [compliance risks](/solutions/compliance/).\n\n### Why GitLab with Red Hat?\n\nRed Hat has a number of technologies in its portfolio. At the core is Red Hat Enterprise Linux ([RHEL](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux)), an enterprise-grade Linux operating system (OS) platform used by many Fortune 500 companies that can be deployed across the hybrid cloud, from bare-metal and virtual servers to private and public cloud environments. RHEL makes it easier for the operations team to manage the upgrades, security patches and life cycles of servers being used to run applications like GitLab. Red Hat also provides the industry’s most comprehensive enterprise Kubernetes platform in Red Hat OpenShift. OpenShift is uniquely positioned to run a containerized application on a public or private cloud.\n\nGitLab can accelerate software development and deployment of applications while RHEL can act as the more secure, fully managed OS that can scale with the application. The inclusion of new DevOps tools in Red Hat’s hybrid cloud technologies like [service mesh](https://www.openshift.com/blog/red-hat-openshift-service-mesh-is-now-available-what-you-should-know) empowers developers to iterate faster on a foundation of trusted enterprise Linux.\n\nThe GitLab solution, which includes [CI/CD workflow](/topics/ci-cd/), an AutoDevOps workflow, a container registry, and Kubernetes integration can be deployed on RHEL using [install](/install/) instructions and you can find out more about GitLab SaaS pricing model [here](/pricing/#gitlab-com). You can read our sales [FAQ](/sales/#faq) or contact our [sales team](/sales/) if you have questions about the offering.\n\nGitLab can be deployed on RHEL-based machines to provide organizations with DevOps infrastructure and collaboration tools. Our collaboration with Red Hat doesn't stop as a supported platform for the GitLab Server but Red Hat OpenShift can also be a target for our CI/CD and Auto DevOps workflows. Application container images can be pushed to our registry and used to deploy applications into Red Hat OpenShift.\n\n### What’s Next?\n\nAs GitLab and Red Hat increase their collaboration, we plan to announce the availability of GitLab Runner Operator for OpenShift in the near future. At GitLab, we have an [engineering epic](https://gitlab.com/groups/gitlab-org/-/epics/2068) underway to develop first-class support for OpenShift.\n\nWith the upcoming product integrations with Red Hat, GitLab is striving to increase collaboration in the organization, increase developer velocity and reduce friction between teams, regardless of the deployment models of VMs or containers. The overarching goal is to help organizations improve their [DevSecOps](/solutions/application-security-testing/) posture while significantly reducing security and compliance risks.\n\n### Resources\n\n- [GitOps:The Future of Infrastructure Automation - A panel discussion with Weaveworks, HashiCorp, Red Hat, and GitLab](https://about.gitlab.com/why/gitops-infrastructure-automation/)\n- [RHEL 8 Install documentation](https://about.gitlab.com/install/#centos-8)\n- [and RHEL 7 Install documentation](https://about.gitlab.com/install/#centos-7)\n- [GitLab on Microsoft Azure](https://docs.gitlab.com/ee/install/azure/)\n- [Try OpenShift](https://www.openshift.com/try)\n",[1124,109,9,563,859],{"slug":1311,"featured":6,"template":734},"gitlab-and-redhat-automation","content:en-us:blog:gitlab-and-redhat-automation.yml","Gitlab And Redhat Automation","en-us/blog/gitlab-and-redhat-automation.yml","en-us/blog/gitlab-and-redhat-automation",{"_path":1317,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1318,"content":1324,"config":1329,"_id":1331,"_type":14,"title":1332,"_source":16,"_file":1333,"_stem":1334,"_extension":19},"/en-us/blog/gitlab-and-workloads-on-ibm-z-and-red-hat-openshift",{"title":1319,"description":1320,"ogTitle":1319,"ogDescription":1320,"noIndex":6,"ogImage":1321,"ogUrl":1322,"ogSiteName":720,"ogType":721,"canonicalUrls":1322,"schema":1323},"GitLab enhances DevOps journey on Linux on IBM Z and Red Hat OpenShift","GitLab integrates with IBM Linux on Z and RedHat OpenShift to help app developers deploy to more resilient systems.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681581/Blog/Hero%20Images/gitlab-linux-ibm-z-redhat-openshift.jpg","https://about.gitlab.com/blog/gitlab-and-workloads-on-ibm-z-and-red-hat-openshift","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab enhances DevOps journey on Linux on IBM Z and Red Hat OpenShift\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vick Kelkar\"}],\n        \"datePublished\": \"2020-09-17\",\n      }",{"title":1319,"description":1320,"authors":1325,"heroImage":1321,"date":1326,"body":1327,"category":1250,"tags":1328},[1247],"2020-09-17","\n\nSeptember 2020 marks 20 years of IBM Linux on Z. If you are using DevOps practices to develop your application on IBM Z, this article is for you. You will learn about how you can leverage GitLab integrations on these resilient systems to enhance your DevOps journey.\n\n## GitLab's journey on Linux on IBM Z and Red Hat OpenShift\n\nRegardless of whether you are using IBM Z or Red Hat OpenShift, revenue-generating applications must be up and available. For example, if a banking application or Point of Sale (POS) application is down for even just five minutes, the company runs the risk of lost revenue during application downtime. This is where high availability (HA) of container platforms like Red Hat OpenShift or hardware stacks like Linux on IBM Z shine. HA strategies such as a horizontal, vertical, consensus, or distributed architectures used by these systems are outside the scope.\n\nSo, how would developers develop and deploy the revenue-generating application to resilient systems mentioned above? How can developers deploy, patch, upgrade, and scale applications in these systems using techniques such as [canary deployments](https://docs.gitlab.com/ee/user/project/canary_deployments.html)? Developers can use GitLab and the [GitLab Runner](https://docs.gitlab.com/runner/) open-source project to run GitLab CI/CD cloud-native pipelines on these resilient systems in the following ways:\n\n* GitLab can be implemented on Linux on Z using logical partitions (LPAR) and virtualization hosts Z/VM. You can learn more about running GitLab on IBM Z using the whitepaper published by the joint GitLab and IBM teams, back in 2017. Request a copy of the whitepaper by reaching out to Suchitra Joshi at IBM (suchi@ibm.com).\n\n* GitLab, with its [13.2 release](/releases/2020/07/22/gitlab-13-2-released/), announced [GitLab Runner support for Linux on IBM Z](/releases/2020/07/22/gitlab-13-2-released/#gitlab-runner-support-for-linux-on-ibm-z). The GitLab 13.2 release supports the execution of runners on Linux on Z and has a Docker image of the runner for the platform. Developers can leverage the full GitLab CI stack through the use of SSH executors on Mainframes and can take advantage of public [GitLab CI/CD examples](https://docs.gitlab.com/ee/ci/examples/).\n\n* GitLab and Red Hat teams teamed up to develop the GitLab Runner Operator for Red Hat OpenShift. You can find GitLab Runner Operator in the OpenShift embedded OperatorHub and [Red Hat container image catalog](https://catalog.redhat.com/software/containers/gitlab/gitlab-operator/5ea09928ecb5246c0903b9d5).\n\n## DevOps, cloud native, and containers\n\nCloud computing is becoming more mainstream with [enterprise](/enterprise/) IT because it offers composability, speed, and elasticity to organizations on a global scale. Cloud computing is also ideal for big transformation projects that are trying to modernize infrastructure and software development processes. Along with cloud computing, enterprises are exploring hybrid cloud and [cloud native](/topics/cloud-native/) approaches for developing and deploying their mission-critical workloads. When it comes to cloud-native approaches, [DevOps](/topics/devops/) plays a crucial role as more and more organizations are adopting modern software development methodologies to develop and scale their workloads.\n\nIt's not a hard requirement but cloud native approaches are usually coupled with containers, which are becoming basic unit of deployment. Containers allow application developers to package and scale applications using a container orchestrator like [Kubernetes](/solutions/kubernetes/).\n\n## What is GitLab?\n\nGitLab is an open source [DevOps platform](/solutions/devops-platform/) delivered as a single application. The open source project has more than 3,000 contributors and a growing [community](/community/). GitLab fundamentally accelerates the software development lifecycle while addressing important enterprise concerns such as security and compliance. GitLab helps organizations with collaboration, version control, continuous integration (CI), continuous delivery (CD) and [DevSecOps](/solutions/application-security-testing/) workflows. GitLab can integrate with existing tools using custom webhooks as well. Read up on GitLab [features](/pricing/feature-comparison/) to learn how to improve developer productivity.\n\n## Looking forward\n\nGitLab aims to help developers deploy their mission-critical applications to the resilient systems of their choice. As the joint teams increase their collaboration, we plan to announce the availability of GitLab on OpenShift in the future. You can follow the progress in the [engineering epic](https://gitlab.com/gitlab-org/gl-openshift).\n\n## Resources\n\n* [GitLab achieves CNCF KCSP status](/blog/gitlab-achieves-kcsp-status/)\n* [GitLab Runner the OpenShift Way](https://www.openshift.com/blog/installing-the-gitlab-runner-the-openshift-way)\n* [Why Linux on Z mainframe?](https://www.ibm.com/it-infrastructure/z/os/linux)\n* [Integrating IBM z/OS platform in CI pipelines with Gitlab](http://www-03.ibm.com/support/techdocs/atsmastr.nsf/WebIndex/WP102827)\n* [GitLab on Red Hat](/partners/technology-partners/redhat/)\n* [Try OpenShift](https://www.openshift.com/try)\n\nCover image by [Matt Howard](https://unsplash.com/@thematthoward) on [Unsplash](https://unsplash.com)\n{: .note}\n",[859,9],{"slug":1330,"featured":6,"template":734},"gitlab-and-workloads-on-ibm-z-and-red-hat-openshift","content:en-us:blog:gitlab-and-workloads-on-ibm-z-and-red-hat-openshift.yml","Gitlab And Workloads On Ibm Z And Red Hat Openshift","en-us/blog/gitlab-and-workloads-on-ibm-z-and-red-hat-openshift.yml","en-us/blog/gitlab-and-workloads-on-ibm-z-and-red-hat-openshift",{"_path":1336,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1337,"content":1342,"config":1347,"_id":1349,"_type":14,"title":1350,"_source":16,"_file":1351,"_stem":1352,"_extension":19},"/en-us/blog/gitlab-ci-cd-with-firebase",{"title":1338,"description":1339,"ogTitle":1338,"ogDescription":1339,"noIndex":6,"ogImage":808,"ogUrl":1340,"ogSiteName":720,"ogType":721,"canonicalUrls":1340,"schema":1341},"How to leverage GitLab CI/CD for Google Firebase","Firebase is a powerful backend-as-a-service tool and when combined with GitLab it can be easy to enable continuous deployment of database, serverless and apps.","https://about.gitlab.com/blog/gitlab-ci-cd-with-firebase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage GitLab CI/CD for Google Firebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-03-16\",\n      }",{"title":1338,"description":1339,"authors":1343,"heroImage":808,"date":1344,"body":1345,"category":752,"tags":1346},[975],"2020-03-16","\n\nBuilding mobile apps can be painful - especially when it comes to finding a way to provide all the tooling needed to make the application feasible without becoming an expert in many different disciplines. [Firebase from Google](https://firebase.google.com/) aims to take away that burden by providing an app deployment platform and a BaaS or Backend-as-a-Service. While the offerings can vary greatly, most BaaS providers include a database, object storage, push notifications and some sort of hosting package. Firebase goes beyond this and provides user authentication built-in as well as [serverless](/topics/serverless/) functions, telemetry, and Google tools for growth.  \n\nThose tools are very appealing to mobile and even web-app developers and Firebase has been successful in that market with customers including The New York Times, Lyft and Duolingo just to name a few. But even with all of the fantastic BaaS tools Firebase brings to bear on a project, it is critical to have source code management and [CI/CD tools](/topics/ci-cd/) to match. As Firebase configuration for important settings such as database security, serverless functions, and hosting can all be stored “as-code” inside your application’s repository, GitLab paired with Firebase can make for a powerful duo.\n\n## Our app\n\nOur application will be a relatively simple link shortener for use with the domain [labwork.dev](https://labwork.dev). In order to build a link shortener, we’ll need the ability to log users in, a database for storing the links and a way to redirect folks coming with the short links to the longer website. Firebase comes with these items packaged together - which should make it relatively painless to stand up (famous last words right?).\n\nI plan on covering the application in more detail in the future, or if you want to jump to the end you can find the [completed project here](https://gitlab.com/brendan-demo/labwork/homepage/). For now, I wanted to at least introduce the architecture plan. I’ll use [Vue.js](https://vuejs.org) for the frontend. Vue.js is a web application that lets users log in using Firebase Authentication. Once logged in, users will have access to a form that allows them to create new short URLs. That form will call a Firebase Function that checks to see if the shortcode requests already exist (or create a random hash if not specified). If the shortcode is unique, the function adds the shortcode and longer URL to the `urls` collection in Firestore and returns okay.  \n\nOnce the shortcode is in the database, I’ll use another cloud function to retrieve the long URL associated with it. Firestore has a great feature that allows you to redirect traffic based on a pattern to a specified function, and I’ll use this so that anything that comes to `/go/{shortcode}` gets magically redirected to the correct long URL.\n\n![Basic Architecture Diagram](https://about.gitlab.com/images/blogimages/firebase_01.png){: .shadow.large.center}\n\n## Add Firebase to the project\n\nOnce we have this architecture finalized, and have built the skeleton of the project and are ready to start deploying and testing, it’s time to add Firebase to our project. Firebase provides a [very helpful CLI tool](https://github.com/firebase/firebase-tools) for getting started here and we’ll use that to begin.\n\nThe first command `firebase init` starts the project initialization process.\n\n![Output of firebase init command](https://about.gitlab.com/images/blogimages/firebase_02.png){: .shadow.large.center}\n\nFrom there, you can select which services you want to use with this project. You’ll also be able to decide to create a new Firebase project, or use one you previously created in the [Firebase console](https://console.firebase.google.com/). You also can select where to store the configuration files. I’ll add a folder called `firebase-config` to store all of these files. Now you are able to source control all changes to your Firebase architecture - from indexes to security rules - all in the same repository as your project.\n\n![Firebase config files](https://about.gitlab.com/images/blogimages/firebase_03.png){: .shadow.large.center}\n\nYou can see all of the changes required to add Firebase to the project [in this merge request](https://gitlab.com/brendan-demo/labwork/homepage/-/merge_requests/1).\n\n## Deploy project to Firebase\n\nNow that Firebase is installed in our project folder and configured, we’re ready to deploy for the first time. In order to deploy the Vue.js portion of the project, we first need to build it to production HTML, CSS and Javascript. So before deployment, run the `yarn build` command.  This will output the build to the `dist` folder by default, and I’ve configured Firebase to recognize that directory as the hosting direction in the `firebase.json`.\n\n![Firebase.json example](https://about.gitlab.com/images/blogimages/firebase_04.png){: .shadow.large.center}\n\nOnce the project is built, running a simple `firebase deploy` will deploy ALL of the features of the project to Firebase: the security rules and indexes for Firestore, the Firebase Functions and the Vue.js project to Firebase Hosting.\n\nIf desired, we can also chose to deploy just a particular part of the project with the `--only` flag. For example, to only deploy a new version of the functions, we can say \n\n`firebase deploy --only functions`\n\nThis is a feature that we’ll combine with GitLab CI/CD in the next step to make our deployments as efficient as possible.\n\n## Automate deployments with GitLab CI/CD\n\nNow that we have the project deploying, we can automate that deploy process so that we don’t have to be at our computer authenticated to Firebase in order to deploy new changes. The steps to automate the deploy are relatively painless and include: (1) acquire a Firebase API key to use during deployment, (2) setup the `.gitlab-ci.yml` file to install the firebase CLI before running any other steps and (3) issue the deployment commands for each part of the infrastructure depending on the change in a particular commit to the main branch.\n\nFirst, we need an API key so that GitLab CI/CD can authenticate to Firebase and perform the deploy. To get the API key, we can run `firebase login:ci` from the same place we were deploying the application previously. This will provide a key that looks something like `` which we’ll add to GitLab.\n\nWhen you enter `firebase login:ci`, open the URL provided in your browser. That will open a Google authentication page; then log in with your Google account and click `Allow`.  Then return to the terminal and you’ll see the authentication code.\n\n![Output of firebase login:ci command](https://about.gitlab.com/images/blogimages/firebase_05.png){: .shadow.large.center}\n\nOnce you’ve successfully authenticated and obtained the token, go to your project on GitLab and go to Settings -> CI/CD -> Variables. Here’s where we’ll add the token as an environmental variable to be used in our deployment jobs. The key is `FIREBASE_TOKEN` and then the value is the token that was printed to your terminal. I’ve made mine both a [protected](https://docs.gitlab.com/ee/ci/variables/#protected-environment-variables) and [masked](https://docs.gitlab.com/ee/ci/variables/#masked-variables) variable. That means the variable will only be exposed to protected branches and if it’s accidentally echoed to the job output, GitLab will hide it from leaking into there.\n\n![Varaiable configuration screen in GitLab](https://about.gitlab.com/images/blogimages/firebase_06.png){: .shadow.large.center}\n\nNow we can start on the configuration for our `.gitlab-ci.yml`.  At the top of the file I’m going to set the default image to be the current node alpine image from Docker hub:\n\n```yaml\nimage: node:12.13.0-alpine\n```\n\nNext, I’ll create a `before_script` that will install the firebase CLI before running any jobs in the file. In the future, I could bundle that CLI into my own custom Docker image to avoid doing this every time, but for now I’ll go with the boring solution.\n\n```yaml\nbefore_script:\n  - npm i -g firebase-tools\n```\n\nFor the build steps, I want to create a separate job for each part of the infrastructure: Firestore, Functions and the Vue app into Firebase Hosting. To do this, I’m going to utilize the ﻿﻿[`only:`](https://docs.gitlab.com/ee/ci/yaml/#only--except) feature to only deploy that part of the infrastructure impacted by changes and that have been merged to master. For example, we’ll only deploy the Firebase Functions when something changes in the `/functions` on the `master` branch\n\n```yaml\ndeploy-functions:\n  stage: deploy\n  script:\n    - cd functions\n    - npm install\n    - cd ..\n    - firebase deploy --only functions --token $FIREBASE_TOKEN\n  only:\n    refs:\n      - master\n    changes:\n      - functions/**/*\n```\nWe’ll repeat this same pattern for both Firestore and the Hosting project, adding the `yarn build` step before deploying hosting each time. Once that’s completed, every time a merge request is accepted, GitLab CI/CD will automatically deploy the changes into our live production application. You can view the [completed `.gitlab-ci.yml` here](https://gitlab.com/brendan-demo/labwork/homepage/-/blob/master/.gitlab-ci.yml), or check out the link shortener for yourself (and try and [Rick Roll](https://labwork.dev/go/30201a) your friends at [labwork.dev](https://labwork.dev)).\n",[109,9,232],{"slug":1348,"featured":6,"template":734},"gitlab-ci-cd-with-firebase","content:en-us:blog:gitlab-ci-cd-with-firebase.yml","Gitlab Ci Cd With Firebase","en-us/blog/gitlab-ci-cd-with-firebase.yml","en-us/blog/gitlab-ci-cd-with-firebase",{"_path":1354,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1355,"content":1361,"config":1367,"_id":1369,"_type":14,"title":1370,"_source":16,"_file":1371,"_stem":1372,"_extension":19},"/en-us/blog/gitlab-ci-on-google-kubernetes-engine",{"title":1356,"description":1357,"ogTitle":1356,"ogDescription":1357,"noIndex":6,"ogImage":1358,"ogUrl":1359,"ogSiteName":720,"ogType":721,"canonicalUrls":1359,"schema":1360},"GitLab CI/CD on Google Kubernetes Engine in 15 minutes or less","Install GitLab's Runner on GKE in a few simple steps and get started with GitLab CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667003/Blog/Hero%20Images/gke_in_15_cover_2.jpg","https://about.gitlab.com/blog/gitlab-ci-on-google-kubernetes-engine","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab CI/CD on Google Kubernetes Engine in 15 minutes or less\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Elliot Rushton\"}],\n        \"datePublished\": \"2020-03-27\",\n      }",{"title":1356,"description":1357,"authors":1362,"heroImage":1358,"date":1364,"body":1365,"category":752,"tags":1366},[1363],"Elliot Rushton","2020-03-27","If you use [GitLab Self-Managed](/pricing/#self-managed), then getting started with GitLab CI using [GitLab's integration with Google Kubernetes Engine (GKE)](/partners/technology-partners/google-cloud-platform/) can be accomplished in a few simple steps. We have several blog posts and documentation that provide detailed [setup instructions for working with Kubernetes clusters](#other-resources). In this post, we highlight the essential steps so that you can get going with GitLab CI/CD in less than 15 minutes.\n\nBy using the GitLab and GKE integration, with one click, you install GitLab Runners on GKE and immediately start running your CI pipelines. Runners are the lightweight agents that execute the CI jobs in your [GitLab CI/CD](/topics/ci-cd/) pipeline.\n\n## Prerequisites:\n\nThe following pre-requisities will need to have been configured in order for you to use the built in GitLab GKE integration:\n- GitLab instance installed and configured with user credentials\n- [Google OAuth2 OmniAuth Provider](https://docs.gitlab.com/ee/integration/google.html) installed and configured on your GitLab instance\n- A Google Cloud project with the following [APIs enabled](https://docs.gitlab.com/ee/integration/google.html#enabling-google-oauth):\n  - Google Kubernetes Engine API\n  - Cloud Resource Manager API\n  - Cloud Billing API\n\n## Get started\n\n![Setup pipeline](https://about.gitlab.com/images/blogimages/ci-gke-in-15/gke_in_15_pipeline.png){: .shadow.medium.center}\n\n### Step 1\n\nWe’re going to add a shared runner at the instance level. First, as an administrator, click the “Admin Area” icon\n\n![Runner setup step 1](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_001.png){: .shadow.medium.center}\n\nThen on the left menu, select “Kubernetes”\n\n![Runner setup step 2](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_002.png){: .shadow.medium.center}\n\n### Step 2\n\nClick the green “Add Kubernetes cluster” button.\n\n![Runner setup step 3](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_003.png){: .shadow.medium.center}\n\n### Step 3\n\nThe screen to “Add a Kubernetes cluster integration” should come up. Click on the “Google GKE” icon on the right.\n\n![Runner setup step 4](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_004.png){: .shadow.medium.center}\n\n### Step 4\n\nGive your cluster a name, and select a “Google Cloud Platform project” from your linked GCP account. If no projects are populated in the menu then either your Google OAUTH2 integration isn’t configured correctly or your project is missing the needed permissions. Check that these are set up and that the [APIs mentioned in the prerequisites above](#prerequisites) are enabled.\n\nChoose a zone in which to run your cluster. For the purposes of running CI, the number of nodes in your cluster is going to be how many simultaneous jobs you can run at given time. As we are using the built-in GitLab Google Kubernetes integration, you can set a maximum of four nodes.\nHere we set that to three.\n\nClick “Create Kubernetes Cluster”\n\n![Runner setup step 5](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_005.png){: .shadow.medium.center}\n\nIt takes a few minutes for the cluster to be created. While it’s happening you should see a screen like this. You can leave this screen and come back (by going to “Admin Area> Kubernetes > [your cluster name]”)\n\n![Runner setup step 6](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_006.png){: .shadow.medium.center}\n\n### Step 5\n\nOnce the cluster has been created, we need to install two applications. First, install “Helm Tiller” by clicking on the “Install” button next to it.\n\n![Runner setup step 7](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_007.png){: .shadow.medium.center}\n\nThis takes a moment, but should be much quicker than creating the cluster initially was.\n\n![Runner setup step 8](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_008.png){: .shadow.medium.center}\n\n### Step 6\n\nNow that Helm Tiller is installed, more applications can be installed. For this tutorial we only need to install the “GitLab Runner” application. Click the install button next to GitLab Runner.\n\n![Runner setup step 9](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_009.png){: .shadow.medium.center}\n\nAgain, this should go pretty quickly.\n\n![Runner setup step 10](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_010.png){: .shadow.medium.center}\n\nOnce done, the button will change to an “Uninstall” button. You’re now set up with shared runners on your GitLab instance and can run your first CI pipeline!\n\n![Runner setup step 11](https://about.gitlab.com/images/blogimages/ci-gke-in-15/ci_gke_in_15_011.png){: .shadow.medium.center}\n\n### Next steps\n\nNow that you are up and running with GitLab CI/CD on GKE, you can build and run your first GitLab CI/CD pipeline. Here are links to a few resources to get you started.\n\n- [Getting Started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/)\n- [How to build a CI/CD pipeline in 20 minutes or less](/blog/building-a-cicd-pipeline-in-20-mins/)\n- [Getting started with Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html)\n\nIf you are planning to manage your own fleet of GitLab Runners, then you may also be thinking about how best to set up autoscaling of GitLab Runners. As we have just set up your first Runner on GKE, then you can review the [GitLab Runner Kubernetes Executor docs](https://docs.gitlab.com/runner/executors/kubernetes.html) for additional details as to how the GitLab Runner uses Kubernetes to run builds on a Kubernetes cluster.\n\n### Other resources\n\n- [Scalable app depoyment webcast](https://about.gitlab.com/webcast/scalable-app-deploy/)\n- [Install GitLab on a cloud native environment](https://docs.gitlab.com/charts/)\n- [Adding and removing Kubernetes clusters](https://docs.gitlab.com/ee/user/project/clusters/add_remove_clusters.html)\n- [Deploy production-ready GitLab on Google Kubernetes Engine](https://cloud.google.com/solutions/deploying-production-ready-gitlab-on-gke)\n\nCover image by [Agê Barros](https://unsplash.com/photos/rBPOfVqROzY) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[232,859,9,109,1229,1147],{"slug":1368,"featured":6,"template":734},"gitlab-ci-on-google-kubernetes-engine","content:en-us:blog:gitlab-ci-on-google-kubernetes-engine.yml","Gitlab Ci On Google Kubernetes Engine","en-us/blog/gitlab-ci-on-google-kubernetes-engine.yml","en-us/blog/gitlab-ci-on-google-kubernetes-engine",{"_path":1374,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1375,"content":1381,"config":1389,"_id":1391,"_type":14,"title":1392,"_source":16,"_file":1393,"_stem":1394,"_extension":19},"/en-us/blog/gitlab-cnh-for-50k-users",{"title":1376,"description":1377,"ogTitle":1376,"ogDescription":1377,"noIndex":6,"ogImage":1378,"ogUrl":1379,"ogSiteName":720,"ogType":721,"canonicalUrls":1379,"schema":1380},"Ready-To-Run GitLab for 50,000 users with AWS Quick Start","If you have two hours, you can deploy a GitLab instance on EKS for any number of users. All it takes is about 14 clicks! Here's what you need to know.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680619/Blog/Hero%20Images/construction-blueprint.jpg","https://about.gitlab.com/blog/gitlab-cnh-for-50k-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to provision Ready-To-Run GitLab for 50,000 users with the AWS Quick Start\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2021-10-06\",\n      }",{"title":1382,"description":1377,"authors":1383,"heroImage":1378,"date":1385,"body":1386,"category":752,"tags":1387},"How to provision Ready-To-Run GitLab for 50,000 users with the AWS Quick Start",[1384],"Darwin Sanoy","2021-10-06","\n\nIf you have spent time reviewing GitLab Reference Architectures, you may have noticed the flexibility of the GitLab codebase; it's possible to support a broad range of implementations from a single box for under one hundred users to horizontal hyper-scaled setups for 50,000 or more.\n\nScaling to massive sizes requires the services within GitLab to be broken out into dedicated compute and storage layers so they can each expand cost effectively based on high loading and an organization's specific usage patterns.\n\nThose who provision large scale systems on the cloud generally turn to [Infrastructure as Code (IaC)](/direction/delivery/infrastructure_as_code/) to ensure consistency and to allow easy setup of pre-production environments for the target system. Until recently, GitLab implementers have had to craft this code from scratch.\n\nNow, thanks to our investments in IaC tooling, GitLab customers now have an entire implementation eco-system to work from. These efforts include the [GitLab Environment Toolkit (GET)](/blog/why-we-are-building-the-gitlab-environment-toolkit-to-help-deploy-gitlab-at-scale/) and the AWS Quick Start for cloud native hybrid on EKS.\n\nThis post will focus on the AWS Quick Start - but it's worth noting both initiatives are open source - so you can consume, customize and contribute!\n\n## What is an AWS Quick Start?\n\nAWS Quick Starts are much more than the \"getting started\" feeling implied by their name. As a part of the Quick Start program, AWS ensures that each one reflects the best practices of the software vendor (GitLab in this case) as well as AWS' own well-architected standards. They reflects a high level of technical partnership and technical assurance by both companies. The Quick Start program also includes a hard requirement for high availability of every component of the deployed application. Even bastion hosts are run in an autoscaling group so they will respawn if they unexpectedly terminate. Quick Starts are also intended to create a \"Ready-to-Run\" implementation whenever possible. Quick Starts are open source and have a dependency model which allows GitLab to reuse the existing EKS Quick Start as a foundation.\n\n## What Is the GitLab AWS implementation pattern for cloud native hybrid on EKS?\n\nGitLab has Reference Architectures that determine how to install GitLab for various user counts. Each Reference Architecture has a section on cloud native hybrid to show how to configure it and the advised number of vCPUs and memory for the target user count. Each one is similar to blueprints for a building. \n\nThe AWS implementation pattern for cloud native hybrid on EKS builds on this information by:\n\n- Showing how to maximize the usage of AWS PaaS with assurance of GitLab Reference Architecture compliance.\n- Showing a tally of total cluster resources as specified by the Rreference Architecture.\n- Presenting a bill of materials listing:\n\n  - EKS node instance type (sizing) and count as tested.\n  - RDS PostgreSQL and Redis Elasticache instance types (sizing) and count as tested.\n  - Gitaly Cluster instance types (sizing) and count as tested.\n  \n- [GPT testing](https://gitlab.com/gitlab-org/quality/performance) results for a system configured according to the bill of materials. This can be used to compare back to the reference architectures and to your own configuration that is based on the bill of materials.\n\nSo while the Reference Architectures are like building blueprints, the AWS implementation pattern for cloud native hybrid on EKS intends to be like a bBill of mterials (shopping list) you can plug directly into the parameters of the AWS Quick Start or the GitLab Environment Toolkit to build GitLab on EKS with a pre-tested configuration.\n\n## \"Deploy Now\" links\n\nWithin each AWS implementation pattern for cloud native hybrid on EKS you will find some \"Deploy Now\" links.  These make the AWS Quick Start even easier to use by presetting all the instance types and instance counts based on the bill of materials for the user size.  This reduces the number of fields you need to fill out on the Quick Start form. The Deploy Now links are how we were able to reduce the number of clicks to deploy for 50,000 users to just 14.\n\nThe Quick Start takes about two hours to deploy regardless of the size of instance you choose.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/s3ZaBXYG8nc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How you can deploy GitLab for any number of users in a couple of hours\n\nThe YouTube playlist [Learning to provision the AWS Quick Start for GitLab on EKS](https://youtube.com/playlist?list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5) walks you through:\n\n1. [GitLab Reference Architectures, performance testing, cloud native hybrid and what is Gitaly](https://www.youtube.com/watch?v=1TYLv2xLkZY&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=1&t=399s) (11mins)\n2. [An overview of GitLab AWS implementation patterns](https://www.youtube.com/watch?v=_x3I1aq7fog&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=2) (13mins)\n3. [An overview of AWS Quick Start for cloud native hybrid on EKS](https://www.youtube.com/watch?v=XHg6m6fJjRY&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=3&t=8s) (9mins)\n4. [Provisioning Ready-To-Run GitLab for 50,000 users in 14 clicks and a long lunch)](https://www.youtube.com/watch?v=s3ZaBXYG8nc&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=4&t=798s) (21mins) - same as above video.\n5. [Easy performance testing an AWS Quick Start-provisioned GitLab cloud native hybrid instance](https://www.youtube.com/watch?v=QpkF1vXXCjk&list=PL05JrBw4t0Koi8VBnoVhmj_MstnbJjGw5&index=5&t=510s) (32mins)\n\nIf you would like help getting started with Gitlab instance provisioning on AWS, please contact your GitLab account team or reach out to [GitLab Sales](https://about.gitlab.com/sales/)!\n",[9,232,1388],"AWS",{"slug":1390,"featured":6,"template":734},"gitlab-cnh-for-50k-users","content:en-us:blog:gitlab-cnh-for-50k-users.yml","Gitlab Cnh For 50k Users","en-us/blog/gitlab-cnh-for-50k-users.yml","en-us/blog/gitlab-cnh-for-50k-users",{"_path":1396,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1397,"content":1403,"config":1409,"_id":1411,"_type":14,"title":1412,"_source":16,"_file":1413,"_stem":1414,"_extension":19},"/en-us/blog/gitlab-commit-london-speakers",{"title":1398,"description":1399,"ogTitle":1398,"ogDescription":1399,"noIndex":6,"ogImage":1400,"ogUrl":1401,"ogSiteName":720,"ogType":721,"canonicalUrls":1401,"schema":1402},"Get a first look at the lineup for GitLab Commit London!","Meet the speakers – and get the schedule – for the upcoming GitLab Commit conference in London.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678515/Blog/Hero%20Images/gitlab-commit-header.png","https://about.gitlab.com/blog/gitlab-commit-london-speakers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get a first look at the lineup for GitLab Commit London!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2019-08-29\",\n      }",{"title":1398,"description":1399,"authors":1404,"heroImage":1400,"date":1406,"body":1407,"category":300,"tags":1408},[1405],"GitLab","2019-08-29","\n\nWe are (forgive us) _properly chuffed_ to announce the amazing speaker lineup for our upcoming\nuser conference, GitLab Commit, on October 9 in London! The London edition of Commit will feature\nan inspiring group of speakers from many of the leading technology organizations in Europe and around the world.\n\nDown a cobblestone drive in the heart of the City of London is [The Brewery](https://www.thebrewery.co.uk/),\nwhere your journey will begin. For one day only, this 18th-century brewery plays host to leaders and\ninnovators who will share their voyages of automation, exploration, and transformation. You'll gain\ninsights to increase velocity and shape the future of development. And then we'll wrap it all up with\na [mini golf party](https://swingers.club/uk) in the moonlight. Jolly good stuff!\n\n [Join us for our first user conference! Register now.](#register-today)\n {: .alert .alert-gitlab-purple .text-center}\n\n## The lineup\n\nLocation coolness aside, the stories and talks are why you should come to GitLab Commit. We've\nworked hard to create a user-centric conference with speakers who not only build great software every day\nbut strive to modernize the process as they do so.\n\nThe GitLab Commit London keynotes will include:\n\n![speaker card alberto gisbert](https://about.gitlab.com/images/blogimages/gitlab-commit-london-speakers/gitlab-commit-london-alberto-gisbert-speaker-card.png){: .shadow.small.right.wrap-text}\n\n#### Establishing a DevOps culture @ Porsche:  A GitLab success story\n##### Alberto Gisbert & Dennis Menge, software engineers at Porsche AG\n\nIn early 2018, Porsche set a goal to become a digital company. However, neither the culture\nnor infrastructure supported modern software development. Automation, cloud infrastructure, and\nbetter collaboration were needed. Software engineers Alberto Gisbert and Dennis Menge share\nPorsche's journey to digital transformation from a developer point of view.\n\n![speaker card matt smith](https://about.gitlab.com/images/blogimages/gitlab-commit-london-speakers/gitlab-commit-london-matt-smith-speaker-card.png){: .shadow.small.left.wrap-text}\n\n#### Zero to K8s: As fast as possible\n##### Matt Smith, software engineer lead, OSCE Team, Capgemini UK\n\nMatt will share his success story starting with no code and no infrastructure to having a\nfunctioning proof of concept application with frontend, backend and database, all\nrunning on Kubernetes. He'll explain how he got there quickly using GitLab and open source\nscripts and tools.\n\n![speaker card alexandru viscreanu](https://about.gitlab.com/images/blogimages/gitlab-commit-london-speakers/gitlab-commit-london-alexandru-constantin-viscreanu-speaker-card.png){: .shadow.small.right.wrap-text}\n\n#### Flying from base to native within the clouds\n##### Alexandru Constantin Viscreanu, platform engineer, Kiwi.com\n\nToday's PaaS landscape is filled with a wide variety of choices so Kiwi.com is constantly working on improving\nits infrastructure stack to take advantage of the many building blocks cloud providers have to offer. In\nthis talk Alex will be sharing how Kiwi manages to keep the technology up to date and what\nlessons have been learned from migrating their services over the years.\n\n## GitLab Commit London tracks\n\n### Cloud native\n\nLearn how to leverage cloud native technologies like Kubernetes, serverless, and [multicloud](/topics/multicloud/) to build\nand run applications better and faster. In \"The beauty of Gitlab CI/CD,\" with Getty Orawo, a software\ndeveloper at Podii, Getty will discuss her journey through DevOps and how she learned to autodeploy.\n\n### DevOps in action\n\nWe all need practical advice and examples of how others have navigated DevOps transformations so\nthese sessions focus on actions and results rather than high-level ideas. In \"Zero-cost infrastructure\nand automatic deployments for small teams,\" with George Tsiolis, UX engineer, and Niki Kontoe,\nbackend engineer, at Ubitech, the speakers will discuss how they planned their engineering\nefforts and improved their development workflows using source code management, code reviews,\ncontinuous integration, and continuous deployment practices.\n\n### Powered by GitLab\n\nSoftware has eaten the world and we are all better for it. Hear how developers like you are\nbuilding on GitLab to solve large, real-world problems including humanitarian crises and even space exploration.\nIn \"Four years with GitLab at Ocado Technology,\" with Piotr Kurpik and Artur Frysiak,\nDevOps engineers at Ocado, the co-presenters will focus on how they honed in on one and only source\ncode tool and the successes and challenges that have shaped their experiences.\n\n## Agenda\n\n\u003Ca id=\"sched-embed\" href=\"//gitlabcommit2019london.sched.com/\">View the GitLab Commit 2019 - London schedule &amp; directory.\u003C/a>\u003Cscript type=\"text/javascript\" src=\"//gitlabcommit2019london.sched.com/js/embed.js\">\u003C/script>\n\n### Register today\n\nDon't wait to register as tickets are going fast! You can still get £99 off if you register before\nSeptember 23 with code [commit99](https://gitlabcommit2019ldntickets.eventbrite.com?discount=commit99).\n",[278,268,9],{"slug":1410,"featured":6,"template":734},"gitlab-commit-london-speakers","content:en-us:blog:gitlab-commit-london-speakers.yml","Gitlab Commit London Speakers","en-us/blog/gitlab-commit-london-speakers.yml","en-us/blog/gitlab-commit-london-speakers",{"_path":1416,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1417,"content":1423,"config":1431,"_id":1433,"_type":14,"title":1434,"_source":16,"_file":1435,"_stem":1436,"_extension":19},"/en-us/blog/gitlab-gdk-remote-development",{"title":1418,"description":1419,"ogTitle":1418,"ogDescription":1419,"noIndex":6,"ogImage":1420,"ogUrl":1421,"ogSiteName":720,"ogType":721,"canonicalUrls":1421,"schema":1422},"Contributor how-to: Remote Development workspaces and GitLab Developer Kit","This tutorial helps you get GDK working inside Remote Development workspaces to begin contributing to GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","https://about.gitlab.com/blog/gitlab-gdk-remote-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributor how-to: Remote Development workspaces and GitLab Developer Kit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Raimund Hook\"}],\n        \"datePublished\": \"2023-07-31\",\n      }",{"title":1418,"description":1419,"authors":1424,"heroImage":1420,"date":1426,"body":1427,"category":752,"tags":1428},[1425],"Raimund Hook","2023-07-31","Open source is fundamental to GitLab. We believe that [everyone can\ncontribute](https://handbook.gitlab.com/handbook/company/mission/#mission).\n\nTypically, we recommend that anyone contributing anything more than basic\nchanges to GitLab run the [GitLab Development\nKit](https://gitlab.com/gitlab-org/gitlab-development-kit) (GDK). Because\ncontributors can't always meet the GDK's resource demands, we're working to\nenable GDK inside the cloud-based GitLab Remote Development workspaces.\n\n\nIn this article, I'll explain how I used a Remote Development workspace\nrunning in my Kubernetes cluster to make working with the GDK faster and\neasier.\n\n\n## A preliminary note\n\nFirst, keep in mind that as of this writing the [Remote Development\nworkspaces](https://about.gitlab.com/direction/create/ide/remote_development/)\nfeature is still in Beta. My example here is therefore very much a proof of\nconcept — and as such, it has some rough edges.\n\n\nBefore getting started, I followed the \"[Set up a\nworkspace](https://docs.gitlab.com/ee/user/workspace/#set-up-a-workspace)\"\nprerequisites guide in the GitLab docs. For a more detailed set of\ninstructions, see Senior Developer Evangelist Michael Friedrich's tutorial\non [how to set up infrastructure for cloud development\nenvironments](https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments/).\n\n\n## Getting started with workspaces\n\nTo start using workspaces, you will need a project configured with a\n`.devfile.yaml`. GitLab team members have curated [a number of example\nprojects](https://gitlab.com/gitlab-org/remote-development/examples) you can\nreview.\n\n\nInitially, I tried to do this with a fork of the GitLab project itself, but\nI ran into [some\nissues](https://gitlab.com/gitlab-org/gitlab/-/issues/414011) when the\nworkspace begins cloning the repository.\n\n\nTo figure out what was causing my problems, I looked more closely at what\nhappens behind the scenes when a workspace is created.\n\n\n## Behind the scenes with Remote Development workspaces\n\nWhen you create a new workspace, the following happens:\n\n1. The GitLab agent for Kubernetes creates a new namespace in your cluster.\nThe agent dynamically generates a name for and assumes management of the\nnamespace.\n\n1. Inside the namespace, a new deployment is created, specifying the\ncontainer you chose in your `.devfile.yaml` as the image to use.\n\n1. This deployment is configured with some [init\ncontainers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)\nthat perform some actions:\n    1. Cloning the repository into `/project/${project_path}`.\n    1. Injecting the VS Code server binary into your container.\n1. Once those init containers are complete, your container starts and the\nworkspace becomes available.\n\n\n## The clone problem\n\nWhen cloning a repository, `git` tends to do much of the work in memory.\nThis can be a challenge on larger projects/repositories, as it can require\nsignificant amounts of RAM. When cloning the GitLab project, for instance,\ngit consumes approximately 1.6GB of RAM. This number is only going to\nincrease with time. Sure, strategies like [shallow\nclones](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---depthltdepthgt)\ncan help reduce this, but these are perhaps less suited to active use by a\ndeveloper as they can increase the amount of time required to perform\nongoing git operations.\n\n\nIn fact, creating a workspace using our `.devfile.yaml` in a fork of the\nGitLab project failed for this reason. The init container performing the\nclone is currently hard-limited to 128MiB of RAM, after which the memory\nmanagement processes on the node kill the container.\n\n\nTo overcome this limitation, move the `.devfile.yaml` into the a fork of the\nroot of the GDK repository. This project clones more quickly (and does so\nusing fewer resources), so it's a  perfect starting point for running GDK\nitself. Another (bonus) advantage: You're then primed to contribute to the\nGDK itself, in addition to any of the other GitLab projects that the GDK\nclones.\n\n\n## Components of a GDK installation\n\nGDK clones the following projects from the GitLab 'family':\n\n* [GitLab](https://gitlab.com/gitlab-org/gitlab)\n\n* [Gitaly](https://gitlab.com/gitlab-org/gitaly)\n\n* [GitLab shell](https://gitlab.com/gitlab-org/gitlab-shell)\n\n\nThis allows you to work on any items in those directories as a part of your\n\"live\" installation.\n\n\n## Getting GDK installed and running in a workspace\n\nOnce I had a workspace up and running, my next step was to get GDK installed\nand running *in* that workspace. The GDK's documentation presents [several\nroutes for doing\nthis](https://gitlab.com/gitlab-org/gitlab-development-kit/#installation).\n\n\nA complete installation can take some time, as GDK needs to bootstrap itself\nand install a number of prerequisites. This is less than ideal in the\ncontext of a Remote Development workspace, as one of remote development's\nprimary benefits is enabling access to a development environment rapidly.\nRequiring a user to bootstrap an environment that takes 50 minutes (or\nlonger) doesn't help achieve this goal.\n\n\nTo combat this, I built a container image that effectively bootstraps and\ninstalls GDK, pre-building the GDK prerequisites and pre-seeding the\ndatabase. This image and its associated tooling are currently [in\nreview](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231).\n\n\n## Pre-building\n\nPre-building the container and running the bootstrap process on a scheduled\nbasis allows us to perform that process once, without requiring the user to\nwait for something that can essentially be \"pre-canned\" for their use.\n\n\nOnce the workspace is running, we still need to \"reinstall\" the GDK\nenvironment with the latest version of our GitLab repository, but this step\ndoesn't take quite as long as a complete bootstrap.\n\n\n## Generating a gdk.yml file\n\nTo work properly, GDK also requires a [`gdk.yml`\nfile](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/configuration.md#gdkyml).\nThis file tells GDK how to configure GitLab to return the correct URLs and\nother items. To get GDK running in Remote Development, Rails needs to return\nURLs in a certain scheme (otherwise your browser won't know where to\nconnect). To help this along, we [inject an environment\nvariable](https://gitlab.com/gitlab-org/gitlab/-/issues/415328) into the\nworkspace container. This variable helps us determine the URL in use (which\nis dynamically generated for each workspace).\n\n\nWe [now have a\nscript](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/support/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh?ref_type=heads)\nin GDK that will generate your `gdk.yml` file based on your workspace.\n\n\n## Creating our devfile\n\nThe contents of my `.devfile.yaml` looks like this:\n\n\n```yaml\n\nschemaVersion: 2.2.0\n\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NB! This image is only in use until https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231 is merged!\n      image: registry.gitlab.com/gitlab-org/gitlab-development-kit/gitlab-remote-workspace:stingrayza-gdk-remote-dev-add-container\n      memoryRequest: 10240M\n      memoryLimit: 16384M\n      cpuRequest: 2000m\n      cpuLimit: 6000m\n      endpoints:\n        - name: ssh-2222\n          targetPort: 2222\n        - name: gdk-3000\n          targetPort: 3000\n        - name: docs-3005\n          targetPort: 3005\n        - name: pages-3010\n          targetPort: 3010\n        - name: webpack-3808\n          targetPort: 3808\n        - name: devops-5000\n          targetPort: 5000\n        - name: jaeger-5778\n          targetPort: 5778\n        - name: objects-9000\n          targetPort: 9000\n        - name: shell-9122\n          targetPort: 9122\n```\n\n\nThis definition comes straight out of the [Workspace\ndocs](https://docs.gitlab.com/ee/user/workspace/#devfile), and opens a\nnumber of ports that GDK uses. (For now, I've only tested the port\n`gdk-3000`, which is the the link to our instance of GDK.)\n\n\n## From Workspace to GDK\n\nOnce we have a project with a `.devfile.yaml`, our final step is to [create\na new\nworkspace](https://docs.gitlab.com/ee/user/workspace/#create-a-workspace).\n\n\nAs a part of this step, your cluster will pull the image as defined in the\n`.devfile.yaml` and start it up. For the GDK image we pre-built, this can\ntake a few minutes.\n\n\nOnce the workspace is ready, the last step is to follow the link from the UI\nto connect to the workspace. This will open up a familiar VS Code IDE, with\nour GDK fork checked out.\n\n\nBut wait, where's GDK?\n\n\nWell, the pre-build did most of the work for us, but we still need to take a\nfew final steps before we can claim that GDK is up and running. These have\nbeen built into a script we can run from the integrated terminal within the\nworkspace.\n\n\nTo open a terminal, we can click on the VS Code Hamburger menu (top left),\nnavigate to `Terminal` and select `New Terminal`.\n\n\nNow we execute the following script, which completes the setup and copies a\ncouple of files over from the pre-built folders:\n\n\n```shell\n\nsupport/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh\n\n```\n\n\nThis can take up to 15 minutes, but when it's done it should output the\nmagic words — something like the following (note the 3000 in the URL; we\nspecified that in the `.devfile.yaml` earlier):\n\n\n```shell\n\nSuccess! You can access your GDK here:\nhttps://3000-workspace-62637-2083197-apglwp.workspace.my-workspace.example.net/\n\n```\n\n\n## Connect to your GDK\n\nFollow the link as displayed using Cmd-click or Ctrl-click. After a couple\nof moments (GDK boot time), you should reach a familiar GitLab login screen.\n\n\nCongratulations! GDK is now running inside your Remote Development\nworkspace.\n\n\nTo log in, type `gdk` in your terminal and you'll see the default admin\ncredentials displayed near the bottom:\n\n\n```shell\n\n# Development admin account: xxxx / xxxx\n\n\nFor more information about GitLab development see\n\nhttps://docs.gitlab.com/ee/development/index.html.\n\n```\n\n\nLog into your GDK with the default credentials, change the admin user\npassword, and you're all set!\n\n\n## Demo of workspace launch\n\nHere's a demo of launching a workspace in my personal cluster:\n\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/iXq1NnTjnX0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n\n## How to contribute to GitLab\n\nIn this article I explained how to get GDK up and running in Remote\nDevelopment workspaces. This is not without its challenges, but the end\nresult should mean that contributing to GitLab (especially in\nresource-constrained environments) is quicker and easier.\n\n\nDo you want to contribute to GitLab? Come and join in the conversation in\nthe `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab),\nor just pop in and say \"hello.\"\n\n\n_Disclaimer: This blog contains information related to upcoming products,\nfeatures, and functionality. It is important to note that the information in\nthis blog post is for informational purposes only. Please do not rely on\nthis information for purchasing or planning purposes. As with all projects,\nthe items mentioned in this blog and linked pages are subject to change or\ndelay. The development, release, and timing of any products, features, or\nfunctionality remain at the sole discretion of GitLab._\n",[1146,563,1429,9,1430,731],"workflow","contributors",{"slug":1432,"featured":6,"template":734},"gitlab-gdk-remote-development","content:en-us:blog:gitlab-gdk-remote-development.yml","Gitlab Gdk Remote Development","en-us/blog/gitlab-gdk-remote-development.yml","en-us/blog/gitlab-gdk-remote-development",{"_path":1438,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1439,"content":1445,"config":1454,"_id":1456,"_type":14,"title":1457,"_source":16,"_file":1458,"_stem":1459,"_extension":19},"/en-us/blog/gitlab-hashicorp-terraform-vault-pt-1",{"title":1440,"description":1441,"ogTitle":1440,"ogDescription":1441,"noIndex":6,"ogImage":1442,"ogUrl":1443,"ogSiteName":720,"ogType":721,"canonicalUrls":1443,"schema":1444},"GitLab and HashiCorp streamline delivery workflows","Discover how to leverage CI/CD for your infrastructure scripts with Terraform and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670238/Blog/Hero%20Images/gitlab-terraform-pipelines.jpg","https://about.gitlab.com/blog/gitlab-hashicorp-terraform-vault-pt-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab and HashiCorp: Providing application and infrastructure delivery workflows\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kelly Hair\"},{\"@type\":\"Person\",\"name\":\"Anthony Davanzo\"}],\n        \"datePublished\": \"2019-09-17\",\n      }",{"title":1446,"description":1441,"authors":1447,"heroImage":1442,"date":1450,"body":1451,"category":815,"tags":1452},"GitLab and HashiCorp: Providing application and infrastructure delivery workflows",[1448,1449],"Kelly Hair","Anthony Davanzo","2019-09-17","\nA growing number of teams are becoming more and more invested in continually improving the business through iterative development. Adopting the culture of DevOps isn’t necessarily confined to software development itself, but is equally applicable to ITOps, System Admins, and other infrastructure teams as well. Just as a proper CI/CD workflow is the foundation of today’s application delivery, a similar automated workflow is essential for managing the delivery of infrastructure as well.\n\nAs developers try to become more agile in building, packing, and testing their applications, having the right CI/CD tool that is flexible to other automation use cases is critical. GitLab has gone into great detail about their [flexible CI/CD capabilities here](https://docs.gitlab.com/ee/ci/introduction/index.html#how-gitlab-cicd-works). What’s sometimes overlooked is implementing the proper CI/CD process for the underlying infrastructure that these applications rely on. In addition to application delivery, organizations need to consider what their infrastructure delivery process looks like. GitLab and HashiCorp have partnered to create a multi-blog series on how to combine the application delivery workflow with the infrastructure delivery workflow. In this part we will discuss a high-level overview of the solutions that we will dive deeper into in Part 2.\n\n## Leveraging HashiCorp Terraform for CI/CD Pipelines\n\n[HashiCorp Terraform](https://www.terraform.io/) is an open source tool for provisioning infrastructure as code. Users define infrastructure in HashiCorp Configuration Language (HCL) configuration files, Terraform reads those configurations, offers a speculative plan of what it will create, and then users confirm and apply those changes. Terraform keeps track of what infrastructure is provisioned in a state file.\n\nThe recently announced Terraform Cloud application provides users with additional automation and collaboration capabilities on top of Terraform, such as remotely managing and version that state file, executing Terraform runs (plan/apply) remotely, and allowing teams to comment and collaborate on Terraform. By remotely managing state files, Terraform Cloud empowers teams to work more quickly and safely in parallel without concerns of losing the file or overwriting each other's changes. These features are especially helpful for users implementing CI/CD pipelines because they allow users to interact with Terraform via webhooks/API instead of having Terraform run on a local machine.\n\nMost users will store their configuration files in a VCS (Version Control System) like GitLab and connect that VCS to Terraform Cloud. That connection allows users to borrow best practices from software engineering to version and iterate on infrastructure as code, using VCS and Terraform Cloud as a provisioning pipeline for infrastructure. Terraform will automatically run a plan upon changes to configuration files in a VCS. This plan can be reviewed by the team for safety and accuracy in the Terraform UI, then it can be applied to provision the specified infrastructure. Terraform Cloud can also be configured to automatically apply those changes.\n\nTerraform Cloud also includes a Governance upgrade, which provides access to the [Sentinel](https://www.hashicorp.com/sentinel) policy as code framework.  This framework allows users to define fine-grain rules and policies for their infrastructure that are automatically enforced before that infrastructure is provisioned. This allows users to work with the speed and efficiency they want in their continuous integration/delivery pipelines, while still ensuring that best practices are being implemented.\n\n### Future iterations\n\nIt is also worth discussing current work in progress with GitLab and Vault. Vault from Hashicorp secures, stores, and tightly controls access to tokens, passwords, certificates, API keys, and other secrets that services depend on. In efforts to improve [Variables and secrets management in GitLab CI/CD](https://gitlab.com/groups/gitlab-org/-/epics/816) we’re working with HashiCorp to provide a [first-class integration with Vault](https://gitlab.com/gitlab-org/gitlab-ce/issues/61053) sometime in the future.\n\n## Next steps\n\nAs a follow up, we will soon be posting a blog on the technical details of _how_ to build a Terraform pipeline in GitLab CI/CD.\n\nIn meantime, check out how [WagLabs reduced their release process from 40 minutes to just six](/blog/wag-labs-blog-post/), using Terraform and GitLab CI/CD!\n\n### About the authors\n\n_[Anthony Davanzo](https://www.linkedin.com/in/anthonydavanzo/) is the product marketing manager for Terraform Cloud at HashiCorp. In this role he focuses on bringing Terraform Cloud to market, hoping to drive adoption and spread awareness of the tool. His prior role as the technical product marketing manager for Terraform helps with deep domain knowledge and before HashiCorp, he was a product marketing manager at Cloudflare._\n\n_[Kelly Hair](/company/team/#khair1) is a solutions architect at GitLab._\n\nPhoto by [Saad Salim](https://unsplash.com/@saadx?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[109,9,563,232,1453],"frontend",{"slug":1455,"featured":6,"template":734},"gitlab-hashicorp-terraform-vault-pt-1","content:en-us:blog:gitlab-hashicorp-terraform-vault-pt-1.yml","Gitlab Hashicorp Terraform Vault Pt 1","en-us/blog/gitlab-hashicorp-terraform-vault-pt-1.yml","en-us/blog/gitlab-hashicorp-terraform-vault-pt-1",{"_path":1461,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1462,"content":1468,"config":1477,"_id":1479,"_type":14,"title":1480,"_source":16,"_file":1481,"_stem":1482,"_extension":19},"/en-us/blog/gitlab-is-an-sca-contender",{"title":1463,"description":1464,"ogTitle":1463,"ogDescription":1464,"noIndex":6,"ogImage":1465,"ogUrl":1466,"ogSiteName":720,"ogType":721,"canonicalUrls":1466,"schema":1467},"Forrester names GitLab challenger in software composition","GitLab has been recognized by analysts as a challenger in Software Composition Analysis.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669950/Blog/Hero%20Images/security-cameras.jpg","https://about.gitlab.com/blog/gitlab-is-an-sca-contender","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab is named a Challenger in The Forrester Wave™: Software Composition Analysis, Q2 2019\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"}],\n        \"datePublished\": \"2019-04-12\",\n      }",{"title":1469,"description":1464,"authors":1470,"heroImage":1465,"date":1472,"body":1473,"category":300,"tags":1474},"GitLab is named a Challenger in The Forrester Wave™: Software Composition Analysis, Q2 2019",[1471],"Cindy Blake","2019-04-12","\n\nWhile GitLab is best known in the traditional DevOps space, we have also begun to grow out our expertise in application security, which may come as a [surprise to security professionals](https://www.linkedin.com/pulse/ciso-cheat-sheet-git-cindy-blake-cissp), who may not have encountered us previously. We may have started out focused on traditional developer tools, however, as GitLab has added capabilities to cover the entire Software Development\nLifecycle (SDLC), this now includes not only a market-leading [Continuous Integration](/blog/gitlab-leader-continuous-integration-forrester-wave/)\nsolution but also, more recently, integrated [application security testing built into the CI/CD pipeline](/solutions/application-security-testing/).\nOur single, end-to-end application enables security testing that is tightly aligned to today’s\nrapid, [iterative cycles of DevOps](/solutions/application-security-testing/) development and the modern\ninfrastructure that accompanies cloud native applications.\n\n## Who was included?\n\nFor The Forrester Wave™: Software Composition Analysis, Q2 2019, participating vendors were required to\nhave most of the following capabilities out of the box:\n- Ability to provide remediation advice on both open source license risk and vulnerabilities;\n- Ability to integrate into SDLC automation tools;\n- Ability to provide proactive vulnerability management;\n- Ability to edit and create policies; and\n- Ability to visually report on open source risk.\n\nParticipating vendors were also required to have more than $10M in revenue and have\ninterest from Forrester clients or relevance to them.\n\n## GitLab is a new challenger\n\nHaving only added security capabilities in December 2017, GitLab has been excluded from\nother analyst application security reports that only look at more established players.\nIn our first official security-oriented analyst evaluation, we are excited not only to get the\nword out about GitLab’s security capabilities, but also to have this opportunity for analyst\nfeedback and insight into how GitLab compares. We take to heart not only areas where we\nshine – but also where improvement is needed. With GitLab,\n“[everyone can contribute](/community/contribute/),” and the feedback gained from\nForrester is another valuable contribution. We also welcome [your participation](/community/contribute/) and invite you to help us\nunderstand what you would like to see as our security capabilities grow.\n\nBased on this analyst report and analyst interaction feedback, we are already addressing improvement opportunities in our\n[roadmap](/direction/secure/#upcoming-releases) and [vision](/direction/secure/#direction).\n\n**Check out our [complete SCA response](/blog/gitlab-is-an-sca-contender/) for links to specific updates and response comments.**\n\nAs a company dedicated to releasing incrementally, delivering first on breadth and then\non depth, it is not uncommon for GitLab to initially place in more of a challenger position,\nas our feature set generally does not have the same maturity as established players in the space.\nHowever, when GitLab enters a space, we do so boldly, with clear intentions and a solid strategy.\nGitLab’s strategy for application security testing and software composition analysis focuses\nmore equally on both the developer and the security professional than traditional solutions.\nYou will find some areas in strategy where we were not scored as highly as we believe we\nshould be, due to our more aggressive focus on development.\n\n## Updates since the evaluation\n\nGitLab has shipped a [major new release every month](/releases/categories/releases/)\nfor 90 consecutive months. Forrester evaluated GitLab 11.6 for this report while versions\n[11.7](/releases/2019/01/22/gitlab-11-7-released/), [11.8](/releases/2019/02/22/gitlab-11-8-released/), and\n[11.9](/releases/2019/03/22/gitlab-11-9-released/) have since been released. You will find several features\nthat Forrester felt were lacking have already been added, including improvements to the\nsecurity dashboard, additional languages added to SAST scanning, and secrets detection.\nWhen using Forrester’s scoring tool, be sure to adjust the criteria for our current capabilities.\nA list of what’s been added since Forrester’s evaluation can be found on our [complete SCA response](/blog/gitlab-is-an-sca-contender/).\n\n## Forrester’s key takeaway: “Remediation, policy management, and reporting are key differentiators”\n\nForrester says, “As developers continue to use open source to accelerate the release of new\napplication functionality, remediation, policy management, and reporting will dictate which\nproviders will lead the pack. Vendors that can provide developers with remediation advice\nand even create patches position themselves to significantly reduce business risk.”\n\nThis takeaway is closely aligned with GitLab's [vision for application security testing](/direction/secure/#direction)\nand our work in progress for [auto remediation](https://gitlab.com/groups/gitlab-org/-/epics/133). While not available in the evaluated version (11.6), today’s GA release, (11.9), [can detect a more current patch available](/releases/2019/03/22/gitlab-11-9-released/#vulnerability-remediation-merge-request) and\nenable the developer to create a [new branch and apply the patch](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#create-a-merge-request-from-a-vulnerability)\nwith one click. Upcoming versions will [automatically run the pipeline and present the results](https://gitlab.com/groups/gitlab-org/-/epics/275) to the developer to accept or reject.\nBy automating remediations that are readily apparent, developers and security can focus on\nvulnerabilities whose remediation is not as straightforward.\n\nThe fact that GitLab is a [single application](/) for the entire SDLC enables us to take\nremediation even further – actually running the pipeline in a separate branch,\neven [measuring the performance impact](https://gitlab.com/gitlab-org/gitlab-ee/issues/9382)\nof the patch. We isolate the cause and effect: the developer makes a code change, that code is\ntested and they see the results before merging the code with others’. It also allows us to do [Dynamic scanning](https://docs.gitlab.com/ee/user/application_security/dast/) in the same manner, before the\ncode is merged with anyone else’s. We do this by spinning up a\n[review app](https://docs.gitlab.com/ee/ci/review_apps/) in the pipeline report.\nThis fully functioning app reflects the developer’s code changes and can be used for user testing,\nperformance testing, and dynamic app security scanning.\n\n## GitLab's advice\n\nWe believe GitLab is ideal for enterprises who are:\n\n* Using GitLab for CI/CD.\n* Practicing iterative development via DevOps.\n* Using containers and serverless.\n\nFor the enterprise that has not invested in app sec tools, GitLab can quickly provide\nscanning, often necessary for regulatory compliance, with a single application.\nGitLab offers SAST, DAST, Dependency, Container Scanning, and License Management [with one app](/solutions/application-security-testing/) – no need to evaluate and buy from multiple vendors, then stitch together integration with the DevOps toolchain. In fact, GitLab customer, [Glympse Inc.](https://glympse.com/),\nstood up 40 repos with automated security testing, using all of the GitLab scans, in less time\nthan they could have installed just the individual tools – and as a bonus, they impressed their\nauditors with their process.\n\nFor the enterprise already deeply invested in traditional app sec tools, GitLab affords a\nbroader and [earlier scanning effort](/solutions/application-security-testing/), using a tool that\ndevelopers are already using. GitLab can scan every code change, much the way that\nevery airplane passenger gets scanned through security. Save the deeper scans for\nlater and/or less frequent evaluation by the security team. Consider using GitLab on select\nprojects to experience the more efficient workflow and potentially reduce your scanning costs from costlier tools.\n\n## Our response\n\n We invite you to see our [complete response](/blog/gitlab-is-an-sca-contender/), and as always, welcome\n [your contributions](/community/contribute/)!\n\n Cover image by [Scott Webb](https://unsplash.com/@scottwebb) on [Unsplash](https://unsplash.com/photos/yekGLpc3vro)\n{: .note}\n",[9,1475,731,1250,979,1476],"inside GitLab","testing",{"slug":1478,"featured":6,"template":734},"gitlab-is-an-sca-contender","content:en-us:blog:gitlab-is-an-sca-contender.yml","Gitlab Is An Sca Contender","en-us/blog/gitlab-is-an-sca-contender.yml","en-us/blog/gitlab-is-an-sca-contender",{"_path":1484,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1485,"content":1491,"config":1497,"_id":1499,"_type":14,"title":1500,"_source":16,"_file":1501,"_stem":1502,"_extension":19},"/en-us/blog/gitlab-jira-integration-selfmanaged",{"title":1486,"description":1487,"ogTitle":1486,"ogDescription":1487,"noIndex":6,"ogImage":1488,"ogUrl":1489,"ogSiteName":720,"ogType":721,"canonicalUrls":1489,"schema":1490},"How to achieve a GitLab Jira integration","Check out how to integrate GitLab self-managed with Atlassian Jira to connect your merge requests, branches, and commits to a Jira issue.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667260/Blog/Hero%20Images/twopeasinapod.jpg","https://about.gitlab.com/blog/gitlab-jira-integration-selfmanaged","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to achieve a GitLab Jira integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-04-12\",\n      }",{"title":1486,"description":1487,"authors":1492,"heroImage":1488,"date":1494,"body":1495,"category":752,"tags":1496},[1493],"Tye Davis","2021-04-12","\n_This is the second in a series of posts on GitLab Jira integration strategies. The [first post](/blog/integrating-gitlab-com-with-atlassian-jira-cloud/) explains how to integrate GitLab.com with Jira Cloud._\n\nThe advantages of a GitLab Jira integration are clear:\n\n* One GitLab project integrates with all the Jira projects in a single Jira instance. \n* Quickly navigate to Jira issues from GitLab. \n* Detect and link to Jira issues from GitLab commits and merge requests. \n* Log GitLab events in the associated Jira issue. \n* Automatically close (transition) Jira issues with GitLab commits and merge requests.\n\nHere's a step-by-step guide of everything you need to know to achieve a GitLab Jira integration.\n\n## Pre-configuration\n\nAs you approach configuring your GitLab project to Jira, you can choose from two options that best fit your company or organization's needs.  You can either:\n\n* Use a service template by having a GitLab administrator provide default values for configuring integrations at the project level. When enabled, the defaults are applied to all projects that do not already have the integration enabled or do not otherwise have custom values enabled. The Jira integration values are all pre-filled on each project's configuration page for jira integration. If you disable the template, these values no longer appear as defaults, while any values already saved for an integration remain unchanged.\n\n* Configure integrations at a specific project level that will contain custom values specific to that project and that project alone.\n\nIt should be noted that each GitLab project can be configured to connect to an entire Jira instance. That means one GitLab project can interact with all Jira projects in that instance, once configured. Therefore, you will not have to explicitly associate a GitLab project with any single Jira project.\n\nGitLab offers several different options that allow you to integrate Jira in a way that best fits you and your team's needs based on how you’ve set up your Jira software. Let’s take a deeper look into how to set-up each of these available options.\n\n## How to configure Jira\n\nThe first step in setting up your Gitlab Jira integration is having your Jira configuration in order. \n\n**Jira Server** supports basic authentication. When connecting, a username and password are required. Note that connecting to Jira Server via CAS is not possible. Set up a user in Jira Server first and then proceed to Configuring GitLab.\n\n**Jira Cloud** supports authentication through an API token, and in order to begin the process you need to start by creating one within Jira. When connecting to Jira Cloud, an email and API token are required. Set up a user in Jira Cloud first and then proceed to Configuring GitLab. \n\nCreate an API token here: https://id.atlassian.com/manage-profile/security/api-tokens  \n\n* Log in to id.atlassian.com with your email address. It is important that the user associated with this email address has write access to projects in Jira\n\n* Click Create API token.\n\n![Create API Token in Jira](https://about.gitlab.com/images/blogimages/atlassianjira/createjiratoken.png){: .shadow.medium.center}\nJira API token creation\n{: .note.text-center}\n\n* Click Copy, or click View and write down the new API token. It is required when configuring GitLab.\n\n![Copy API Token](https://about.gitlab.com/images/blogimages/atlassianjira/copyjiratoken.png){: .shadow.medium.center}\nJira API token copy to clipboard\n{: .note.text-center}\n\n## How to configure GitLab\n\nAs mentioned above, you can begin setting up the Jira integration either by using a service template that defaults all GitLab projects to pre-fill Jira values or you can set up at an individual project level. \n\nTo set up a service template:\n\n* 1a. Navigate to the Admin Area > Service Templates and choose the Jira service template.\n\n![GitLab Service Templates](https://about.gitlab.com/images/blogimages/atlassianjira/GitLabServiceTemplates.png){: .shadow.medium.center}\nGitLab Service Templates\n{: .note.text-center}\n\n2a. For each project, you will still need to configure the issue tracking URLs by replacing :issues_tracker_id in the above screenshot with the ID used by your external issue tracker.\n\n![Issue Tracker ID](https://about.gitlab.com/images/blogimages/atlassianjira/issuetrackerid.png){: .shadow.medium.center}\nIssue Tracker ID\n{: .note.text-center}\n\nTo set up a individual project template:\n\n* 1b. To enable the Jira integration in a project, navigate to the Integrations page and click the Jira service.\n\n![Enable Jira Integration](https://about.gitlab.com/images/blogimages/atlassianjira/enablejiraintegration.png){: .shadow.medium.center}\nEnable Jira Integration\n{: .note.text-center}\n\n* 2b. Select a Trigger action. This determines whether a mention of a Jira issue in GitLab commits, merge requests, or both, should link the Jira issue back to that source commit/MR and transition the Jira issue, if indicated.\n\n![Select Trigger Action](https://about.gitlab.com/images/blogimages/atlassianjira/selecttriggeraction.png){: .shadow.medium.center}\nSelect Trigger Action\n{: .note.text-center}\n\n* 3b. To include a comment on the Jira issue when the above reference is made in GitLab, check Enable comments.\n\n* 3c.  Enter the further details on the page as described in the following table:\n\n| Field | Description |\n|-------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| Web URL | The base URL to the Jira instance web interface which is being linked to this GitLab project. E.g.,  https://jira.example.com. |\n| Jira API URL | The base URL to the Jira instance API. Web URL value will be used if not set. E.g.,  https://jira-api.example.com. Leave this field blank (or use the same value of Web URL) if using Jira Cloud.|\n| Username or Email | Use username for Jira Server or email for Jira Cloud |\n| Transition ID | Required for closing Jira issues via commits or merge requests. This is the ID of a transition in Jira that moves issues to a desired state. If you insert multiple transition IDs separated by , or;, the issue is moved to each state, one after another, using the given order. (See below for obtaining a transition ID) |\n\nIn order to obtain a transition ID, do the following:\n* By using the API, with a request like https://yourcompany.atlassian.net/rest/api/2/issue/ISSUE-123/transitions using an issue that is in the appropriate “open” state\n\n*Note: The transition ID may vary between workflows (e.g., bug vs. story), even if the status you are changing to is the same.*\n\n![Transition ID](https://about.gitlab.com/images/blogimages/atlassianjira/transitionid.png){: .shadow.medium.center}\nTransition ID\n{: .note.text-center}\n\nYour GitLab project can now interact with all Jira projects in your instance and the project now displays a Jira link that opens the Jira project.\n\nWhen you have configured all settings, click **Test settings and save changes.** \n\n![Test settings and save changes](https://about.gitlab.com/images/blogimages/atlassianjira/testsettingsandsavechanges.png){: .shadow.medium.center}\nTest settings and save changes\n{: .note.text-center}\n\nIt should be noted that you can only display issues from a single Jira project within a given GitLab project.\n\nThe integration is now **activated:**\n\n![Active Jira Integration](https://about.gitlab.com/images/blogimages/atlassianjira/activeintegration.png){: .shadow.medium.center}\nActive Jira Integration\n{: .note.text-center}\n\n## Jira Issues\n\nBy now you should have [configured Jira](https://docs.gitlab.com/ee/integration/jira/index.html#configuring-jira) and enabled the [Jira service in GitLab](https://docs.gitlab.com/ee/integration/jira/index.html#configuring-gitlab). If everything is set up correctly you should be able to reference and close Jira issues by just mentioning their ID in GitLab commits and merge requests.\n\nJira issue IDs must be formatted in uppercase for the integration to work.\n\n### 1.How to reference Jira issues\n\nWhen GitLab project has Jira issue tracker configured and enabled, mentioning Jira issue in GitLab will automatically add a comment in Jira issue with the link back to GitLab. This means that in comments in merge requests and commits referencing an issue, e.g., PROJECT-7, will add a comment in Jira issue in the format:\n\nUSER mentioned this issue in RESOURCE_NAME of [PROJECT_NAME|LINK_TO_COMMENT]:\nENTITY_TITLE\n\n* USER A user that mentioned the issue. This is the link to the user profile in GitLab.\n* LINK_TO_THE_COMMENT Link to the origin of mention with a name of the entity where Jira issue was mentioned.\n* RESOURCE_NAME Kind of resource which referenced the issue. Can be a commit or merge request.\n* PROJECT_NAME GitLab project name.\n* ENTITY_TITLE Merge request title or commit message first line.\n\n![Reference Jira issues](https://about.gitlab.com/images/blogimages/atlassianjira/issuelinks.png){: .shadow.medium.center}\nReference Jira issues\n{: .note.text-center}\n\nFor example, the following commit will reference the Jira issue with PROJECT-1 as its ID:\n\ngit commit -m \"PROJECT-1 Fix spelling and grammar\"\n\nClosing Jira Issues\n\nJira issues can be closed directly from GitLab when you push code by using trigger words in commits and merge requests. When a commit which contains the trigger word followed by the Jira issue ID in the commit message is pushed, GitLab will add a comment in the mentioned Jira issue and immediately close it (provided the transition ID was set up correctly).\n\nThere are currently three trigger words, and you can use either one to achieve the same goal:\n* Resolves PROJECT-1\n* Closes PROJECT-1\n* Fixes PROJECT-1\n\nwhere PROJECT-1 is the ID of the Jira issue.\n\nNotes:\n\n* Only commits and merges into the project’s default branch (usually main or master) will close an issue in Jira. You can change your projects default branch under project settings.\n\n* The Jira issue will not be transitioned if it has a resolution.\n\nLet’s consider the following example:\n\n* For the project named PROJECT in Jira, we implemented a new feature and created a merge request in GitLab.\n* This feature was requested in Jira issue PROJECT-7 and the merge request in GitLab contains the improvement\n* In the merge request description we use the issue closing trigger Closes PROJECT-7.\n* Once the merge request is merged, the Jira issue will be automatically closed with a comment and an associated link to the commit that resolved the issue.\n\nIn the following screenshot you can see what the link references to the Jira issue look like.\n\n![GitLab link references](https://about.gitlab.com/images/blogimages/atlassianjira/linkreferences.png){: .shadow.medium.center}\nGitLab link references\n{: .note.text-center}\n\nOnce this merge request is merged, the Jira issue will be automatically closed with a link to the commit that resolved the issue.\n\n![Jira Issue auto closes when GitLab MR merges](https://about.gitlab.com/images/blogimages/atlassianjira/jiraautoclose.png){: .shadow.medium.center}\nJira Issue auto closes when GitLab MR merges\n{: .note.text-center}\n\n## Development Panel Integration Set-Up\n\n### A. Jira DVCS configuration\n\nWhen using the Jira DVCS configuration, there are several different configurations you can make that are dependent on how your Jira/GitLab instances are managed.\n\n* If you are using self-managed GitLab, make sure your GitLab instance is accessible by Jira.\n* If you’re connecting to Jira Cloud, ensure your instance is accessible through the internet.\n* If you are using Jira Server, make sure your instance is accessible however your network is set up.\n\n### B. GitLab account configuration for DVCS\n\n* In GitLab, create a new application to allow Jira to connect with your GitLab account.\nWhile signed in to the GitLab account that you want Jira to use to connect to GitLab, click your profile avatar at the top right, and then click Settings > Applications. Use the form to create a new application.\n\n* In the Name field, enter a descriptive name for the integration, such as Jira.\nFor the Redirect URI field, enter https://\u003Cgitlab.example.com>/login/oauth/callback, replacing \u003Cgitlab.example.com> with your GitLab instance domain. For example, if you are using GitLab.com, this would be https://gitlab.com/login/oauth/callback.\nNote: If using a GitLab version earlier than 11.3, the Redirect URI must be https://\u003Cgitlab.example.com>/-/jira/login/oauth/callback. If you want Jira to have access to all projects, GitLab recommends that an administrator create the application.\n\n![Admin Creates Integration](https://about.gitlab.com/images/blogimages/atlassianjira/admincreates.png){: .shadow.medium.center}\nAdmin Creates Integration\n{: .note.text-center}\n\n* Check API in the Scopes section and uncheck any other checkboxes.\n\n* Click Save application. GitLab displays the generated Application ID and Secret values. Copy these values, which you will use in Jira.\n\n*Tip: To ensure that regular user account maintenance doesn’t impact your integration, create and use a single-purpose jira user in GitLab.*\n\n## Jira DVCS Connector setup\n\nNote: If you’re using GitLab.com and Jira Cloud, we recommend you use the [GitLab for Jira app](https://docs.gitlab.com/ee/integration/jira/index.html), unless you have a specific need for the DVCS Connector.\n\n* Ensure you have completed the [GitLab configuration](https://docs.gitlab.com/ee/integration/jira/index.html).\n\n![Check api in Applications](https://about.gitlab.com/images/blogimages/atlassianjira/checkapi.png){: .shadow.medium.center}\nCheck api in Applications\n{: .note.text-center}\n\n![Application was created successfully](https://about.gitlab.com/images/blogimages/atlassianjira/applicationsuccessful.png){: .shadow.medium.center}\nApplication was created successfully\n{: .note.text-center}\n\n* If you’re using Jira Server, go to Settings (gear) > Applications > DVCS accounts. If you’re using Jira Cloud, go to Settings (gear) > Products > DVCS accounts.\n\n![Go to DVCS in Settings](https://about.gitlab.com/images/blogimages/atlassianjira/dvcssettings.png){: .shadow.medium.center}\nGo to DVCS in Settings\n{: .note.text-center}\n\n* Click Link GitHub Enterprise account to start creating a new integration. (We’re pretending to be GitHub in this integration, until there’s additional platform support in Jira.)\n\n![Click Link to start new integration](https://about.gitlab.com/images/blogimages/atlassianjira/dvcsaccount.png){: .shadow.medium.center}\nClick Link to start new integration\n{: .note.text-center}\n\n* Complete the form:\nSelect GitHub Enterprise for the Host field.\nIn the Team or User Account field, enter the relative path of a top-level GitLab group that you have access to, or the relative path of your personal namespace.\n\n![Add new account](https://about.gitlab.com/images/blogimages/atlassianjira/addnewaccount.png){: .shadow.medium.center}\nAdd new account\n{: .note.text-center}\n\nIn the Host URL field, enter https://\u003Cgitlab.example.com>/, replacing \u003Cgitlab.example.com> with your GitLab instance domain. For example, if you are using GitLab.com, this would be https://gitlab.com/.\n\n*Note: If using a GitLab version earlier than 11.3 the Host URL value should be https://\u003Cgitlab.example.com>/-/jira*\n\nFor the Client ID field, use the Application ID value from the previous section.\n\nFor the Client Secret field, use the Secret value from the previous section.\n\nEnsure that the rest of the checkboxes are checked.\n\n* Click Add to complete and create the integration.\nJira takes up to a few minutes to know about (import behind the scenes) all the commits and branches for all the projects in the GitLab group you specified in the previous step. These are refreshed every 60 minutes.\n\nIn the future, we plan on implementing real-time integration. If you need to refresh the data manually, you can do this from the Applications -> DVCS accounts screen where you initially set up the integration:\n\n![Refresh data manually](https://about.gitlab.com/images/blogimages/atlassianjira/refreshdata.png){: .shadow.medium.center}\nRefresh data manually\n{: .note.text-center}\n\nTo connect additional GitLab projects from other GitLab top-level groups (or personal namespaces), repeat the previous steps with additional Jira DVCS accounts.\n\nFor troubleshooting your DVCS connection, go to [GitLab Docs](https://docs.gitlab.com/ee/integration/jira/index.html) for more information.\n\n_In our next blog post we'll look at [Usage](https://docs.gitlab.com/ee/integration/jira_development_panel.html#usage)._\n\nCover image by [Mikołaj Idziak](https://unsplash.com/@mikidz) on [Unsplash](https://unsplash.com/photos/nwjRmbXbLgw).\n{: .note.text-left}\n",[9,563,1062],{"slug":1498,"featured":6,"template":734},"gitlab-jira-integration-selfmanaged","content:en-us:blog:gitlab-jira-integration-selfmanaged.yml","Gitlab Jira Integration Selfmanaged","en-us/blog/gitlab-jira-integration-selfmanaged.yml","en-us/blog/gitlab-jira-integration-selfmanaged",{"_path":1504,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1505,"content":1511,"config":1517,"_id":1519,"_type":14,"title":1520,"_source":16,"_file":1521,"_stem":1522,"_extension":19},"/en-us/blog/gitlab-joins-cd-foundation",{"title":1506,"description":1507,"ogTitle":1506,"ogDescription":1507,"noIndex":6,"ogImage":1508,"ogUrl":1509,"ogSiteName":720,"ogType":721,"canonicalUrls":1509,"schema":1510},"GitLab leads the industry forward with the CD Foundation","Today we're proud to announce we've joined the CD Foundation as a founding member.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663648/Blog/Hero%20Images/gitlab-joins-cd-foundation.jpg","https://about.gitlab.com/blog/gitlab-joins-cd-foundation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab leads the industry forward with the CD Foundation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sid Sijbrandij\"}],\n        \"datePublished\": \"2019-03-12\",\n      }",{"title":1506,"description":1507,"authors":1512,"heroImage":1508,"date":1514,"body":1515,"category":300,"tags":1516},[1513],"Sid Sijbrandij","2019-03-12","\n\nToday GitLab joined the [CD Foundation](https://cd.foundation/announcement/2019/03/12/the-linux-foundation-announces-new-foundation-to-support-continuous-delivery-collaboration/) as a founding member, to help foster collaboration and educate the industry on how to enable any software development team around the world to implement CI/CD best practices.\n\nAs one of the first to introduce cloud native CI/CD to the industry, we are excited to see so many companies come together to discuss ways to take the industry forward to ensure that code is able to get to production not only quickly, but securely. We are looking forward to lending our experience working with millions of developers and thousands of enterprises to drive forward the conversation on best practices and standards to streamline the code delivery promise.\n\n## But there is more to software delivery than CI/CD\n\nWhile it is great there is an eye on the best CI/CD practices, we believe there is more to delivering great software to market than just CI/CD, which is why at GitLab we are focused on providing a single application for the entire DevOps lifecycle.\n\nIt is not only about source code management or CI/CD but also about:\n- [Value stream management](/solutions/value-stream-management/): Understanding your teams' work and their workflow so they can deliver value to customers faster.\n- Operational excellence: Implementing dynamic infrastructure and robust observability to increase uptime and decrease mean time to resolution.\n- Security flow: Building security into every step of your code delivery process, to deliver secure software without slowing the pace of innovation.\n- Monitoring: Automatically monitor metrics so you know how any change in code impacts your production environment.\n\nOur entire [2019 product vision and beyond](/blog/gitlab-product-vision/) is about continuing to build out new capabilities across the entire DevOps lifecycle, to make it easier for enterprises to streamline their processes into one application, helping teams innovate at faster speeds.\n\nAs an open source company, we value the community’s contributions, in helping make GitLab what it is today. We look forward to continuing to drive the industry forward in CI/CD, as well as working with you to help deliver your products to market quickly and securely.\n\nPhoto by [YIFEI CHEN](https://unsplash.com/photos/FPMRxKd7MxI?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/spiral-lights?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,1062,731],{"slug":1518,"featured":6,"template":734},"gitlab-joins-cd-foundation","content:en-us:blog:gitlab-joins-cd-foundation.yml","Gitlab Joins Cd Foundation","en-us/blog/gitlab-joins-cd-foundation.yml","en-us/blog/gitlab-joins-cd-foundation",{"_path":1524,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1525,"content":1530,"config":1535,"_id":1537,"_type":14,"title":1538,"_source":16,"_file":1539,"_stem":1540,"_extension":19},"/en-us/blog/gitlab-journey-from-azure-to-gcp",{"title":1526,"description":1527,"ogTitle":1526,"ogDescription":1527,"noIndex":6,"ogImage":1220,"ogUrl":1528,"ogSiteName":720,"ogType":721,"canonicalUrls":1528,"schema":1529},"GitLab’s journey from Azure to GCP","GitLab Staff Engineer Andrew Newdigate shares how we completed our migration to Google Cloud Platform, and how we overcame challenges along the way.","https://about.gitlab.com/blog/gitlab-journey-from-azure-to-gcp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab’s journey from Azure to GCP\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-02\",\n      }",{"title":1526,"description":1527,"authors":1531,"heroImage":1220,"date":1532,"body":1533,"category":300,"tags":1534},[772],"2019-05-02","\n\nLast June, we had to face the facts: Our SaaS infrastructure for GitLab.com was not ready for mission-critical workloads, error rates were just too high, and availability was too low. To address these challenges, we decided to migrate from Azure to Google Cloud Platform (GCP) and document the journey publicly, end to end. A lot has happened since [we first talked about moving to GCP](/blog/moving-to-gcp/), and we’re excited to share the results.\n\nAt [Google Cloud Next '19](https://cloud.withgoogle.com/next/sf), GitLab Staff Engineer [Andrew Newdigate](/company/team/#suprememoocow) presented our migration experience and the steps we took to make it happen. Migrations seldom go as planned but we hope that others can learn from the process. Check out the video to learn more about our journey from Azure to GCP, and find some of our key takeaways below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ve_9mbJHPXQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThere were several reasons why we decided on the Google Cloud Platform. One top priority was that we wanted GitLab.com to be suitable for mission-critical workloads, and GCP offered the performance and consistency we needed. A second reason is that we believe [Kubernetes](/solutions/kubernetes/) is the future, especially with so much development geared toward [cloud native](/topics/cloud-native/). Another priority was price. For all of these reasons and more, Google was the clear choice as a partner going forward.\n\nOur company values are important to us and we apply them to all aspects of our work and our migration from Azure to GCP is no exception.\n\n## Three core values guided this project:\n\n###  Efficiency\n\nAt GitLab, [we love boring solutions](https://handbook.gitlab.com/handbook/values/#boring-solutions). The goal of the project was really simple: Move GitLab.com to GCP. We wanted to find the least complex and most straightforward solution to achieve this goal.\n\n### Iteration\n\nWe focus on shipping the [minimum viable change](https://handbook.gitlab.com/handbook/values/#minimal-viable-change-mvc) and work in steps. When we practice iteration, we get feedback faster, we’re able to course-correct, and we reduce cycle times.\n\n### Transparency\n\nWe work [publicly by default](https://handbook.gitlab.com/handbook/values/#public-by-default), which is why we made [this project accessible to everyone](https://gitlab.com/gitlab-com/migration/) and [documented our progress](https://docs.google.com/document/d/1p3Brri44_SKyakViKB-LGWCmCcwILW6z2A8a8eWFyFc/edit?usp=sharing) along the way.\n\n## How we did it\n\nLooking for the simplest solution, we considered whether we could just stop the whole site: Copy all the data from Azure to GCP, switch the DNS over to point to GCP, and then start everything up again. The problem was that we had too much data to do this within a reasonable time frame. Once we shut down the site, we'd need to copy all the data between two cloud providers, and once the copy was complete, we'd need to verify all the data (about half a petabyte) and make sure it was correct. This plan meant that GitLab.com could be down for _several days_, and considering that thousands and thousands of people rely on GitLab on a daily basis, this wouldn’t work.\n\n![GitLab Geo diagram](https://about.gitlab.com/images/gitlab_ee/gitlab_geo_diagram_migrate.png){: .medium.center}\n\nWe went back to the drawing board. We were working on another feature called [Geo](https://docs.gitlab.com/ee/administration/geo/index.html) which allows for full, read-only mirrors of GitLab instances. Besides browsing the GitLab UI, Geo instances can be used for cloning and fetching projects as well as for a planned failover to migrate GitLab instances.\n\nWe hoped that by taking advantage of the replication capabilities we were building for Geo, we could migrate the entire GitLab.com site to a secondary instance in GCP. The process might have taken weeks or months, but thankfully the site would be available throughout the synchronization process. Once all the data was synchronized to GCP, we could verify it and make sure it was correct. Finally, we could just promote the GCP environment and make it our new primary.\n\nThis new plan had many advantages over the first one. Obviously, GitLab.com would be up during the synchronization and we would only have a short period of downtime, maybe an hour or two, rather than weeks. We could do full QA, load testing, and verify all data before the failover.\n\n>\"If it could work for us on GitLab.com, it would pretty much work for any other customer who wanted to use Geo. We could be confident in that.\" - Andrew Newdigate, Infrastructure Architect at GitLab\n\n![Helm charts](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/helm_charts.png){: .medium.center}\n\nWe were also working on another major project to install and run GitLab on Kubernetes. Much like Omnibus is a package installer for installing GitLab _outside_ a Kubernetes environment, GitLab’s helm charts [install GitLab inside a Kubernetes environment](https://docs.gitlab.com/charts/). The plan evolved to use helm charts to install GitLab in GCP while still using Geo for replication.\n\nIt became apparent there were problems with this approach as we went along:\n\n*   The changes we needed to make to the application to allow it to become fully cloud native were extensive and required major work.\n*   The timeframes of the GCP migration and cloud native projects wouldn’t allow us to carry them out simultaneously.\n\nWe ultimately decided it would be better to postpone the move to Kubernetes until after migration to GCP.\n\nWe went to the next iteration and decided to use Omnibus to provision the new environment. We also migrated all file artifacts, including CI Artifacts, Traces (CI log files), file attachments, LFS objects and other file uploads to [Google Cloud Storage](https://cloud.google.com/storage/) (GCS), moving about 200TB of data off our Azure-based file servers into GCS. Doing this reduced the risk and the scale of the Geo migration.\n\nThe steps for the migration were now fairly straightforward:\n\n*   Set up a Geo secondary in GCP.\n*   Provision the new environment with Omnibus.\n*   Replicate all the data from GitLab.com in Azure to GCP.\n*   Test the new environment and verify all the data is correct.\n*   Failover to the GCP environment and promote it to primary.\n\nThere was only one major unknown left in this plan: The actual failover operation itself.\n\nUnfortunately, **Geo didn’t support a failover operation**, and nobody knew exactly how to do it. It was essential that we executed this perfectly, so we used our value of iteration to get it right.\n\n![GitLab failover procedure issue template](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/issue_template.png){: .medium.center}\n\n*   We set up the failover procedure as an issue template in the GitLab migration issue tracker with each step as a checklist item.\n*   Every time we practiced, we created a new issue from the template and followed the checklist step by step.\n*   After each failover, we would review and consider how we could improve the process.\n*   We would submit these changes as merge requests to the issue template.\n\nThe merge requests were thoroughly reviewed before being approved by the team and through this very tight, iterative feedback loop, the checklist grew to cover every possible scenario we experienced. In the beginning, things almost never went according to plan, but with each iteration, we got better. In the end, there were _over 140 changes_ in that document before we felt confident enough to move forward with the failover. We let Google know and an amazing team was assembled to help us. The failover went smoothly and we didn't experience any major problems.\n\n## Results\n\nGoing back to the goals of the project: Did we make GitLab.com suitable for mission-critical workloads? Firstly, let's consider availability on GitLab.com.\n\n![GitLab Pingdom chart](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/errors_per_day.png){: .shadow.medium.center}\n\nThis [Pingdom](https://www.pingdom.com/) graph shows the number of errors we saw per day, first in Azure and then in GCP. The average for the pre-migration period was 8.2 errors per day, while post-migration it’s down to **just one error a day**.\n\n![GitLab availability](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/gitlab_availability.png){: .shadow.medium.center}\n\nLeading up to the migration, our availability was 99.61 percent. [In our October update](/blog/gitlab-com-stability-post-gcp-migration/) we were at 99.88 percent. As of April 2019, we've improved to **99.93 percent** and are on track to reach our target of 99.95 percent availability.\n\n![GitLab latency chart](https://about.gitlab.com/images/blogimages/gitlab-journey-from-azure-to-gcp/latency.png){: .shadow.medium.center}\n\nThis latency histogram compares the site performance of GitLab.com before and after moving to GCP. We took data for one week before the migration and one week after the migration. The GCP line shows us that the latencies in GCP drop off quicker, which means GitLab.com is not only faster, it’s more predictable, with fewer outlier values taking an unacceptably long time.\n\n[GitLab users have also noticed the increased stability](https://www.reddit.com/r/gitlab/comments/9f71nq/thanks_gitlab_team_for_improving_the_stability_of/), which is an encouraging sign that we've taken steps in the right direction.\n\nIt's important to note that these improvements can't be attributed to the migration alone – we explore some other contributing factors in [our October update](/blog/gitlab-com-stability-post-gcp-migration/).\n\n\n## What we learned\n\n* Having this amount of visibility into a large-scale migration project is pretty unusual, but it gave us an opportunity to put our values to the test. By opening our documentation to the world, we can collaborate and help others on their own migration journey.\n*  Working by our values gave us the ability to get the quick feedback we needed. Even though we weren’t able to use GitLab on Kubernetes during the migration, we course-corrected and came up with the right solutions.\n* We were able to see exactly how Google developers work and got an up-close look into how one of the fastest-moving companies in the world actually manages its [DevOps lifecycle](/topics/devops/). This knowledge will have a long-term impact on GitLab and how we support these organizations in the future.\n\nIf you would like to learn more about how we migrated to GCP, feel free to take a look at the **[issue tracker](https://gitlab.com/gitlab-com/migration/)** and our **[project documentation](http://bit.ly/2UrlU4s)**.\n",[1147,9,1229,859],{"slug":1536,"featured":6,"template":734},"gitlab-journey-from-azure-to-gcp","content:en-us:blog:gitlab-journey-from-azure-to-gcp.yml","Gitlab Journey From Azure To Gcp","en-us/blog/gitlab-journey-from-azure-to-gcp.yml","en-us/blog/gitlab-journey-from-azure-to-gcp",{"_path":1542,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1543,"content":1549,"config":1556,"_id":1558,"_type":14,"title":1559,"_source":16,"_file":1560,"_stem":1561,"_extension":19},"/en-us/blog/gitlab-on-vmware-cloud-marketplace",{"title":1544,"description":1545,"ogTitle":1544,"ogDescription":1545,"noIndex":6,"ogImage":1546,"ogUrl":1547,"ogSiteName":720,"ogType":721,"canonicalUrls":1547,"schema":1548},"GitLab for Cloud Native Transformation on VMware Marketplace","Guest authors from VMware share how to accelerate your software delivery process in just a few clicks with Bitnami and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680841/Blog/Hero%20Images/bitnami-gitlab.png","https://about.gitlab.com/blog/gitlab-on-vmware-cloud-marketplace","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Enterprise Edition now available for VMware Cloud Marketplace users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Raquel Campuzano\"}],\n        \"datePublished\": \"2019-10-11\",\n      }",{"title":1550,"description":1545,"authors":1551,"heroImage":1546,"date":1553,"body":1554,"category":300,"tags":1555},"GitLab Enterprise Edition now available for VMware Cloud Marketplace users",[1552],"Raquel Campuzano","2019-10-11","\n\nHave you ever tried to choose from an extensive list of developer tools and wondered what you should do next? You’re not alone. There are hundreds of solutions to choose from, which can make it challenging to select the right solution and deploy.\n\nNow, GitLab and Bitnami have partnered to offer VMware users [GitLab](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9) in the VMware Cloud Marketplace. This version package is free, fully functional, and easy to [upgrade to an enterprise plan](https://docs.bitnami.com/vmware-marketplace/apps/gitlab-ee/get-started/license/).\n\n### Reduce costs and avoid security risks\nAs the industry leader in application packaging, Bitnami helped GitLab create an easy, click-to-deploy, open source solution. The GitLab Enterprise Edition (CORE) Virtual Appliance certified by Bitnami is an up-to-date and secure image that includes the latest versions of the application, its components, and the most recent security fixes. You can run GitLab with confidence; Bitnami’s automated pipeline and tools for building and testing applications ensure this application can run on any platform without issues. If you experience any problems deploying the solution, you can contact the [Bitnami Support team](https://community.bitnami.com/c/gitlab) with your questions.\n\n### Run on VMware infrastructure in a few clicks\nTo make GitLab available in the [VMware Cloud Marketplace](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9), GitLab placed its trust in Bitnami’s expertise in packaging. GitLab users now have the ability to run the latest version on their VMware infrastructure in a few clicks.\n\n### Some of the key benefits of GitLab's marketplace listing:\n* GitLab includes a built-in container registry and Kubernetes integration, enabling you to quickly create a [continuous integration (CI)](/solutions/continuous-integration/) pipeline with Kubernetes. Learn more about [creating a CI/CD pipeline with GitLab and Kubernetes](https://docs.bitnami.com/tutorials/create-ci-cd-pipeline-gitlab-kubernetes/).\n* By deploying GitLab on a VMware cloud server, you can add a budget- and resource-checking stage to your pipeline. This allows you to implement best practices into your continuous deployment (CD) process and control the consumption and costs of your application deployments.\n* Premium features such as code quality and performance testing, static and dynamic application security testing, package dependency analysis, and automated tests for vulnerabilities enable you to identify and remediate issues and security breaches from development to monitoring stages. Learn more about [building misconfiguration and vulnerability checks into your CI/CD pipeline to achieve continuous security](https://thenewstack.io/how-continuous-security-can-solve-the-cloud-protection-conundrum/).\n\n### How do you get started? We’ll show you how\nIn order to upgrade your GitLab Core version to enjoy the Enterprise Edition features, take the following steps:\n\n1) First log into the [VMware Cloud Marketplace](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9) and browse for the “GitLab Enterprise (CORE) Virtual Appliance” solution.\n\n2) Then click to view the details. Note: The GitLab Enterprise (CORE) Virtual Appliance is available in the [VMware Cloud Marketplace](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9) in two deployment options: VMware Cloud on AWS (VMC) or vCloud Director (VCD).\n\n![GitLab is available in the VMware Cloud Marketplace in two deployment options: VMware Cloud on AWS (VMC) or vCloud Director (VCD)](https://about.gitlab.com/images/blogimages/gitlabonvmware1.png){: .shadow.medium.center}\n\n3) To deploy the application both on VMC or VCD, you need to first subscribe to the image, as shown below:\n\n![To deploy the application both on VMC or VCD, you need to first subscribe, as shown below](https://about.gitlab.com/images/blogimages/subscribetovmwmarketplace.png){: .shadow.medium.center}\n\n4) Then, select the platform where you wish to deploy it, as shown below:\n\n![After subscribing, select the VMC or VCD platform where you wish to deploy](https://about.gitlab.com/images/blogimages/deploytovmwplatform.png){: .shadow.medium.center}\n\n5) Depending on the platform you select, you will be redirected to the vSphere Client or vCloud Director platform. Follow these instructions to launch a [GitLab Enterprise (CORE) Virtual Appliance using the vSphere Client](https://docs.bitnami.com/vmware-marketplace/apps/gitlab-ee/get-started/get-started-vmware-cloud/) or as a [vApp from VMware vCloud Director](https://docs.bitnami.com/vmware-marketplace/get-started-vcloud-director/).\n\n6) When you deploy the [GitLab Enterprise (CORE) Virtual Appliance certified by Bitnami](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9), you get the free and fully functional [Core version of GitLab](/pricing/#self-managed), which is easily upgradable to Starter, Premium, or Ultimate. To upgrade, sign into the application, navigate to the “Admin Area,” and then select the “License” menu option. As you can see in the image below, you now have the option to either upload your `.gitlab-license` file or start a [free trial](/free-trial/).\n\nNote: If you start a free trial, you will be able to try all the paid features for the duration of the trial. After that time, your server will revert to Core features.\n{: .alert .alert-info}\n\n![To upgrade, sign into the application, navigate to the “Admin Area,” and then select the “License” menu option](https://about.gitlab.com/images/blogimages/vmwmarketplacefreetrial.png){: .shadow.medium.center}\n\n\n7) Once you activate your license, paid features will be enabled as shown below and you can start deploying with confidence.\n\n![Once you activate your license, paid features will be enabled](https://about.gitlab.com/images/blogimages/vmwpremiumfeatures.png){: .shadow.medium.center}\n\n## Conclusion\n\nWhat used to be a complex task is now just a few clicks, without compromising your budget and your security. Enjoy all the advantages of the GitLab in the VMware Cloud Marketplace and accelerate your software delivery process by leveraging the simplicity of the Bitnami experience.\n\n[Get started now](https://marketplace.cloud.vmware.com/services/details/129dc4e9-191d-405f-ab4d-803d56f366a9). If you have any questions, feel free to reach out to the Bitnami Support team!\n\n### About the guest author\n\nRaquel Campuzano is a Content Marketing Specialist at Bitnami, now part of VMware. She is in charge of managing the creation of technical content that allows developers to deploy awesome software everywhere. Raquel was part of the Bitnami team as technical writer. Her know-how creating tutorials, product documentation, and videos gave her the ability to identify in which stage of developer’s journey the user experience can be improved.\n\nPrevious to Bitnami, she led the communication and marketing strategy for Redborder (cybersecurity) and Oklan (network and hosting services). She is also a member of Ping a Programadoras, a non-profit organisation focused on promoting women’s inclusion in programming and software development.\n",[109,9,563,232],{"slug":1557,"featured":6,"template":734},"gitlab-on-vmware-cloud-marketplace","content:en-us:blog:gitlab-on-vmware-cloud-marketplace.yml","Gitlab On Vmware Cloud Marketplace","en-us/blog/gitlab-on-vmware-cloud-marketplace.yml","en-us/blog/gitlab-on-vmware-cloud-marketplace",{"_path":1563,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1564,"content":1570,"config":1576,"_id":1578,"_type":14,"title":1579,"_source":16,"_file":1580,"_stem":1581,"_extension":19},"/en-us/blog/gitlab-operator-red-hat-certification",{"title":1565,"description":1566,"ogTitle":1565,"ogDescription":1566,"noIndex":6,"ogImage":1567,"ogUrl":1568,"ogSiteName":720,"ogType":721,"canonicalUrls":1568,"schema":1569},"GitLab Operator certified by Red Hat OpenShift","The GitLab Operator is now certified by Red Hat’s OpenShift standards, allowing users to install GitLab directly on an OpenShift cloud cluster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682717/Blog/Hero%20Images/bi_worldwise_casestudy_image.png","https://about.gitlab.com/blog/gitlab-operator-red-hat-certification","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Operator certified by Red Hat OpenShift\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dilan Orrino\"}],\n        \"datePublished\": \"2023-05-11\",\n      }",{"title":1565,"description":1566,"authors":1571,"heroImage":1567,"date":1573,"body":1574,"category":729,"tags":1575},[1572],"Dilan Orrino","2023-05-11","\nGitLab and Red Hat have been technology partners for more than two years, collaborating on a number of projects. GitLab first started its integration with Red Hat’s OpenShift cloud-based container platform by introducing the [GitLab Runner Operator](https://catalog.redhat.com/software/container-stacks/detail/5e9877e96c5dcb34dfbb1ac9) in GitLab Version 13.3. The Runner Operator offered the capability to run pipeline tasks from an external GitLab instance to OpenShift clusters.\n\nOur next step was to more closely integrate with the OpenShift platform and alleviate the need to require the GitLab instance to run external to OpenShift-based infrastructure. [The GitLab Operator](https://docs.gitlab.com/operator/) is now certified by Red Hat, which enables the capability to install an instance of GitLab inside of an OpenShift cloud cluster.\n\n## Benefits of GitLab with Red Hat OpenShift\n\nThe [Operator framework](https://operatorframework.io/about/) offers many benefits, but the main reason we identified is that it would allow us to run a self-managed instance of GitLab inside an OpenShift cluster. The GitLab DevSecOps platform can be operated on the same trusted infrastructure as other applications and services within a customer's organization. \n\nThe Operator framework also delivers a streamlined installation and seamless version upgrades. As the GitLab Operator continues to be developed, we hope to add other elements of the Operator framework such as backup and recovery, comprehensive metrics, and auto-tuning and auto-scaling. GitLab plans to align our future cloud-native deployment model behind our Operator.\n\n![Capability model](https://about.gitlab.com/images/blogimages/gitlaboperatorcapabilitymodel.png){: .shadow}\n\n\n## Details of the Red Hat certification\n\nThe Red Hat Certification included aligning our application components with Red Hat’s Universal Base Image (UBI) when deploying through the Red Hat Marketplace. The Red Hat Certification also included meeting all of [Red Hat’s policy requirements](https://access.redhat.com/documentation/en-us/red_hat_software_certification/8.61#con-operator-requirements_openshift-sw-cert-policy-products-managed). The certification signifies GitLab being supported on OpenShift in collaboration with Red Hat. The Operator as a deployment method will be available as a recommended choice in Q3, but is available for testing now.\n\n## A technical milestone\n\nThe GitLab application is complex, so building an Operator to deploy it was a technical achievement for the GitLab and Red Hat engineering teams. Completing this operator certification is a significant milestone and gives customers the confidence and assurance that GitLab runs effectively, jointly supported by Red Hat, on OpenShift.\n\n![GitLab Operator install screen](https://about.gitlab.com/images/blogimages/gitlaboperatorinstall.png){: .shadow}\n\n\n## Try the GitLab Operator\n\n[The GitLab Operator](https://docs.gitlab.com/operator/) is available now for testing in the OpenShift console via the embedded OperatorHub, and will be production ready for GitLab instances in Q3 2023. Check out the [catalog listing](https://catalog.redhat.com/software/container-stacks/detail/5ec3fcb08b6f188e53644c0f) for links to documentation and installation instructions. For a self-managed free trial to host GitLab on your OpenShift cluster, [submit this form](/free-trial/?hosted=self-managed).\n",[283,731,9],{"slug":1577,"featured":6,"template":734},"gitlab-operator-red-hat-certification","content:en-us:blog:gitlab-operator-red-hat-certification.yml","Gitlab Operator Red Hat Certification","en-us/blog/gitlab-operator-red-hat-certification.yml","en-us/blog/gitlab-operator-red-hat-certification",{"_path":1583,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1584,"content":1589,"config":1594,"_id":1596,"_type":14,"title":1597,"_source":16,"_file":1598,"_stem":1599,"_extension":19},"/en-us/blog/gitlab-pages-update",{"title":1585,"description":1586,"ogTitle":1585,"ogDescription":1586,"noIndex":6,"ogImage":1220,"ogUrl":1587,"ogSiteName":720,"ogType":721,"canonicalUrls":1587,"schema":1588},"Update about GitLab Pages","If you are using GitLab Pages with a custom domain, you may need to update your DNS.","https://about.gitlab.com/blog/gitlab-pages-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Update about GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David Smith\"}],\n        \"datePublished\": \"2018-08-28\",\n      }",{"title":1585,"description":1586,"authors":1590,"heroImage":1220,"date":1591,"body":1592,"category":752,"tags":1593},[1225],"2018-08-28","\n\nAfter completing our move to Google Cloud Platform (GCP) on August 11, 2018, GitLab.com traffic has been served from our new infrastructure in GCP. For GitLab Pages users, we left a proxy in place in Azure to be backwards compatible for those Pages users who had an A record pointing to the IP Address at our Azure location.\n\nWe had planned a graceful window to let people have time to migrate their DNS records.  In our [July GCP move update](/blog/gcp-move-update/), we referenced the new IP address at GCP that people should use.\n\nIn that transition, users should have moved their DNS records from 52.167.214.135 to 35.185.44.232.\n\nThis week, we started cleanup of parts of our now legacy Azure infrastructure. Unfortunately, that cleanup also caught up the Azure load balancer that had the old 52.167.214.135 IP address for the GitLab pages proxy. We quickly filed a ticket to see if we could reclaim the IP address, but could not be guaranteed that we could get it back when we rebuilt the load balancer. This post is to get the information out for those Pages users who have been affected by this change.\n\n### What you need to know:\n\nIf you are using GitLab Pages with a custom domain AND you have an A record in DNS that points to the old Azure IP, you will need to update your DNS:\n\n|from IP (old)|to IP (new)|\n|",[1147,9,1229,859],{"slug":1595,"featured":6,"template":734},"gitlab-pages-update","content:en-us:blog:gitlab-pages-update.yml","Gitlab Pages Update","en-us/blog/gitlab-pages-update.yml","en-us/blog/gitlab-pages-update",{"_path":1601,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1602,"content":1608,"config":1614,"_id":1616,"_type":14,"title":1617,"_source":16,"_file":1618,"_stem":1619,"_extension":19},"/en-us/blog/gitlab-security-hardening-documentation",{"title":1603,"description":1604,"ogTitle":1603,"ogDescription":1604,"noIndex":6,"ogImage":1605,"ogUrl":1606,"ogSiteName":720,"ogType":721,"canonicalUrls":1606,"schema":1607},"The backstory on GitLab's security hardening documentation","GitLab has detailed documentation about how to harden your instance, now as a part of GitLab itself. Here's how it came to be.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665667/Blog/Hero%20Images/built-in-security.jpg","https://about.gitlab.com/blog/gitlab-security-hardening-documentation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The backstory on GitLab's security hardening documentation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2023-08-01\",\n      }",{"title":1603,"description":1604,"authors":1609,"heroImage":1605,"date":1611,"body":1612,"category":979,"tags":1613},[1610],"Mark Loveless","2023-08-01","\nRecently, my fellow Security team member [Ayoub Fandi](https://gitlab.com/ayofan) released an excellent blog post entitled “[How to harden your self-managed GitLab instance](https://about.gitlab.com/blog/how-to-harden-your-self-managed-gitlab-instance/),” which featured seven steps for helping to lock down your environment. Ayoub’s blog post was based, in part, on early drafts of research into hardening I had been working on. I had no idea how long it would take me to reach a point where I could finally say this, but those original drafts are now a [part of GitLab documentation](https://docs.gitlab.com/ee/security/hardening.html).\n\n## Hardening your GitLab instance\nThe goal of the hardening documentation is for DevSecOps teams to be able to apply security controls and make sure your data and your GitLab instance are protected.\n\nHere's what I will cover:\n- the origins of the hardening documentation\n- field research, including a few attack scenarios\n- bringing an instance online\n- insight into when to use hardening, specifically how to do a bit of threat modeling and get the basics sorted before diving deep into full hardening \n\n## Origins of the hardening documentation\nThe idea for creating extensive hardening documentation started with a blog post I wrote three years ago called “[GitLab instance: Security best practices](https://about.gitlab.com/blog/gitlab-instance-security-best-practices/).\" This became a rather popular reference for GitLab customers asking about securing their self-managed instance (and even a SaaS deployment).\n\nAs I participated in other security efforts around Gitlab, such as [FedRAMP certification](https://handbook.gitlab.com/handbook/security/security-assurance/security-compliance/fedramp-compliance/), [compliance requirements](https://handbook.gitlab.com/handbook/security/security-assurance/security-compliance/certifications/), and general [security concerns](https://handbook.gitlab.com/handbook/security/), I realized we needed more support materials focused on the \"Sec\" in DevSecOps.\n\nThe hardening project was born from this -  based largely off of an instance I had loaded up in 2020 and was using at home. During all of this time, from 2020 until now, I took notes, performed tests, had hacker friends and work colleagues poke and prod at this home system, and, then took even more notes. No doubt, the information I learned would be a huge benefit to GitLab users so I opted to create recommendations that could be updated frequently and accessible directly from a GitLab instance via the Help menu. \n\n## Field research\nI've been a security professional for well over a couple of decades and have had my own domain online since 1997, using five static IP addresses in my house. This deployment includes web services, a Mastodon instance, and a mail server. These systems have been used by a few close hacker friends during this time as a testbed, a place to converse and exchange ideas, and a great educational environment. A few years ago, I thought it would be fun for this group to have its own private GitLab instance called [Blackhole](https://blackhole.nmrc.org/). We could work on coding projects together, collaborate, and, since I worked at GitLab, I could use it for testing of a standalone instance for certain work-inspired issues.\n\nHaving live servers up with dedicated and static IP addresses means that, yes, these servers are under pretty much constant attack. Because of this clustering of security friends on these systems, we’ve even been targeted by more sophisticated attackers, up to and including nation states. A perfect test environment for real-world attack scenarios.\n\n### Attack scenarios\nLike many in the security field, over the years I’ve seen a number of attacks firsthand, so I made a list of things I needed for hardening against attacks. After doing some [threat modeling](https://handbook.gitlab.com/handbook/security/product-security/application-security/threat-modeling/), I noted the following three areas of concern:\n1. **Opportunistic intruders (okay, who am I kidding, [script kiddies](https://www.techtarget.com/searchsecurity/definition/script-kiddy-or-script-kiddie)).** These types of attacks are typically composed of easy-to-use scripts against known vulnerable applications. This has happened to me. Within five minutes of installation of an odd web-based application, while still reading the documentation and wondering if the apparently vulnerable software was actually working, it was compromised. So I had to assume immediate attacks would happen if I loaded this up on a live system exposed to the open internet.\n2. **Layered or chained attacks.** This is when an attacker takes advantage of a particular sub-component that is exposed to the internet, and while the sub-component might not allow for full system access, it could allow for access to another sub-component with access to data. I was determined to disable or secure as much as possible, leaving as few exposed ports or running services as possible.\n3. **Advanced persistent threat ([APT](https://en.wikipedia.org/wiki/Advanced_persistent_threat)) attackers.** They have repeatedly gone after my former employers, and as they learned who their employees were, home systems would become targets, and [mine were no exception](https://www.markloveless.net/blog/2021/6/8/advancing-persistently-against-apt). To this day, I receive an APT attack attempt every few months. Knowing that one tactic of APT attackers is supply chain attacks, having a DevSecOps platform shared by hackers could be seen as a delicious target, so security had to be top of mind.\n\n## Bringing an instance online\nAs I installed Blackhole, I first configured the firewall at the operating system level to close off all of the ports from public access, only allowing access from my internal network. As a rule, my perimeter router allows all traffic in for those five static IP addresses, with each system following strict firewall rules using the operating system’s firewall capabilities. Most of these five systems cannot even talk to each other, or only have the bare minimum connectivity configured to enable functionality. As I was monitoring traffic to my public systems via my perimeter router, in less than 10 minutes, I started seeing port scans against Blackhole’s IP address, well before it had even finished installation of the Linux operating system. I was glad the firewall was up and running from the start.\n\nAs GitLab was installed and Blackhole came up in its state as a GitLab instance, I started going through the various settings and making sure that things were locked down as tight as possible. Anything I wasn’t using was disabled. This applied to the underlying operating system as well as the GitLab software itself. When I felt good enough about it, I adjusted the firewall settings to open things up ever so slightly, and the system has been under near constant attack since.\n\n## When to use the hardening documentation\nGitLab is a comprehensive DevSecOps platform that can handle all kinds of security scenarios. GitLab the company uses the product to not only develop the platform, but we also run the company off of it. The feature-rich platform can be configured in many different ways. Keeping that in mind, note that one setup might be set up to be more secure than another simply because of the environment it needs to be included in. There are drastically different configuration choices for an environment that is publicly accessible vs. one that is only accessible from employee workstations, or  a large enterprise with employees located on multiple continents vs. a small business' single server deployment.\n\nHardening, therefore, is dependent on your unique environment, and requires you to understand the threats you need to mitigate against, and account for any regulatory and compliance requirements to which you must adhere. However, there are a few common steps that can lead you through the process.\n\n### Start with the basics\nThe first recommendation is to start with a few basics. Make sure you have some ground rules established in your organization such as password standards, software upgrade schedules, and compliance requirements. This will make it easier as you move through the process. Understand the threats your organization has faced in the past, and the potential threats you could face in the future. I wrote a blog post on [threat modeling](https://handbook.gitlab.com/handbook/security/product-security/application-security/threat-modeling/) and [we use it internally](https://handbook.gitlab.com/handbook/security/product-security/application-security/threat-modeling/howto.html) as well.\n\n### Full hardening\nI’d recommend reading [Ayoub’s blog post](https://about.gitlab.com/blog/how-to-harden-your-self-managed-gitlab-instance ) and follow the seven steps he puts forward. In many cases, after you’ve finished Ayoub’s blog post you will have enough to meet your security needs right there. If you need more, delve into the [hardening recommendations documentation](https://docs.gitlab.com/ee/security/hardening.html). Adapt it as needed to meet your organization’s security demands, and explore the possibilities to increase the security of your environment. Note that these recommendations are not limited to just GitLab settings, but also includes a few recommendations for the underlying operating system itself.\n\n### Share your feedback\nIf you have ideas for more security tips and tricks or questions regarding the hardening documentation, please open an issue on GitLab. We’d love to hear from you and welcome feedback and contributions! And if you want to learn more about how we do security at GitLab, review the [security section](https://handbook.gitlab.com/handbook/security/) of the handbook.\n",[979,9,573],{"slug":1615,"featured":6,"template":734},"gitlab-security-hardening-documentation","content:en-us:blog:gitlab-security-hardening-documentation.yml","Gitlab Security Hardening Documentation","en-us/blog/gitlab-security-hardening-documentation.yml","en-us/blog/gitlab-security-hardening-documentation",{"_path":1621,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1622,"content":1628,"config":1635,"_id":1637,"_type":14,"title":1638,"_source":16,"_file":1639,"_stem":1640,"_extension":19},"/en-us/blog/gitlab-serverless-with-cloudrun-for-anthos",{"title":1623,"description":1624,"ogTitle":1623,"ogDescription":1624,"noIndex":6,"ogImage":1625,"ogUrl":1626,"ogSiteName":720,"ogType":721,"canonicalUrls":1626,"schema":1627},"Announcing GitLab Serverless deploying to Cloud Run for Anthos","Discover how we're making it easier to deploy serverless workloads on-premise with Anthos.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666851/Blog/Hero%20Images/gitlab-serverless-blog.png","https://about.gitlab.com/blog/gitlab-serverless-with-cloudrun-for-anthos","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Announcing GitLab Serverless deploying to Cloud Run for Anthos\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayank Tahilramani\"}],\n        \"datePublished\": \"2019-11-19\",\n      }",{"title":1623,"description":1624,"authors":1629,"heroImage":1625,"date":1631,"body":1632,"category":729,"tags":1633},[1630],"Mayank Tahilramani","2019-11-19","\nThis week at Google Cloud Next ’19 UK, Google Cloud grew its Anthos product portfolio with the addition of Cloud Run for Anthos running on-prem. I’m excited to share that GitLab has been collaborating with Google Cloud product teams to support this launch and enable customers with CI/CD and [GitLab Serverless](/topics/serverless/) capabilities for quicker and easier adoption of serverless solutions. In the spirit of our partnership, our support for [Cloud Run for Anthos](https://cloud.google.com/run) is a continuation of our collaboration [announced earlier this year at Google Cloud Next ’19 in San Francisco](/blog/running-a-consistent-serverless-platform/), where we showed how you can deploy a serverless function to Cloud Run using the same developer workflow you’re already familiar with in GitLab. Now, we’re looking to bring that same UX and workflow consistency to Cloud Run deployments on Anthos running on-premise. Overall, together, GitLab and Google Cloud are aiming to lower the barrier of adoption for customers looking to architect scalable, cloud native solutions. \n\nHowever, when discussing cloud native, oftentimes ‘public cloud infrastructure’ comes to mind. But when I think of cloud native, I think of the various, modern ways of architecting scalable solutions, backed by managed services to make operations more convenient. Until very recently, infrastructure-centric managed services like Google Kubernetes Engine (GKE), Cloud Run, StackDriver, etc. have been traditionally associated with workloads running within cloud data centers. Given the recent announcements of [Google Cloud Anthos](https://cloud.google.com/blog/products/serverless/knative-based-cloud-run-services-are-ga), Google is clearly broadening the boundaries of cloud native across hybrid and heterogeneous environments, including customer data centers. As the infrastructure landscape diversifies, as application development intertwines with abstraction layers of managed services, and as workload flexibility becomes inherent with microservice containerization, the one thing you can rely on staying consistent is GitLab’s developer workflow to supplement all the above. In the context of all things [serverless](/topics/serverless/), let's take a closer look at what’s available today, what we’re still working on, and what that means for our users.\n\n## What’s available today\n\nGitLab serves as a single application for all of [DevOps](/topics/devops/), which includes building, deploying, and managing serverless applications. GitLab serverless enables developers to focus on writing application code without having to worry about Kubernetes or Knative YAML configuration. GitLab provides templates allowing developers to easily build and deploy Knative services that can be deployed to Cloud Run. Here is a [quick video walkthrough on the anatomy of a serverless project hosted in GitLab and deployed to Knative](https://youtu.be/IIM8JWhAbNk?t=210). With Google, you have a few options on how to leverage Cloud Run as a deployment target for GitLab CI/CD. As of this week, you can run Cloud Run in three different flavors: \n\n1. **Cloud Run**: This is a fully managed cloud service powered by Knative for serverless apps. GitLab supports deploying to Cloud Run and the full CI/CD workflow to leverage GitLab Runners to build and test functions. GitLab takes in the [`serverless.yml`](https://docs.gitlab.com/ee/update/removals.html) file within the root of your source code repository to define and deploy to Cloud Run.  \n\n2. **Cloud Run for Anthos running on Google Cloud**: This is a managed deployment of Knative on Anthos GKE clusters running on Google Cloud Platform. This enables you to install a managed Cloud Run deployment on top of your own Kubernetes cluster. Similar to above, GitLab also supports deploying to Cloud Run via the full CI/CD workflow, but as of right now, the highest version of Knative supported by GitLab is 0.7. Latest version support for Knative is coming in [GitLab 12.6](/releases/) on Dec. 22, 2019.  \n\n3. **Cloud Run for Anthos running on-premise**: Similar to above, this flavor of Cloud Run enables users to run a managed Cloud Run deployment on top of Anthos GKE On-Prem in your own data center. Currently, Knative v.0.9 is deployed in GKE-OP clusters. GitLab is soon to release support for Knative v0.9 and users can track the progress of this work in [this open issue](https://gitlab.com/gitlab-org/gitlabktl/issues/55) today. If you like what we’re working on, stop by and give us a thumbs up for feedback. So far, internal testing has been very positive and we look forward to formally supporting Cloud Run for Anthos running on-premise in the coming months/releases. The user experience will be almost identical to the prior two use cases listed above as you would expect.\n\n## Where to get started\n\nIf you’re interested in getting started with some sample code, check out our [documentation](https://docs.gitlab.com/ee/update/removals.html) and [sample app project](https://gitlab.com/knative-examples/functions) for reference. Additionally, [here is a walkthrough of deploying a demo app to Cloud Run from GitLab](https://youtu.be/lb_bRRAgEyc?t=1103). If you’re looking to get started with Serverless on Google Cloud Platform, [sign up for GitLab.com here](https://gitlab.com/users/sign_up) and then [sign up for $200 additional free GCP credits](https://cloud.google.com/partners/partnercredit/?PCN=a0n60000006Vpz4AAC).\n",[109,9,563,232,1634],"careers",{"slug":1636,"featured":6,"template":734},"gitlab-serverless-with-cloudrun-for-anthos","content:en-us:blog:gitlab-serverless-with-cloudrun-for-anthos.yml","Gitlab Serverless With Cloudrun For Anthos","en-us/blog/gitlab-serverless-with-cloudrun-for-anthos.yml","en-us/blog/gitlab-serverless-with-cloudrun-for-anthos",{"_path":1642,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1643,"content":1649,"config":1656,"_id":1658,"_type":14,"title":1659,"_source":16,"_file":1660,"_stem":1661,"_extension":19},"/en-us/blog/gitops-done-3-ways",{"title":1644,"description":1645,"ogTitle":1644,"ogDescription":1645,"noIndex":6,"ogImage":1646,"ogUrl":1647,"ogSiteName":720,"ogType":721,"canonicalUrls":1647,"schema":1648},"3 Ways to approach GitOps","Learn about how GitLab users can employ GitOps to cover both Kubernetes and non-Kubernetes environments","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669635/Blog/Hero%20Images/gitops-cover.jpg","https://about.gitlab.com/blog/gitops-done-3-ways","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"3 Ways to approach GitOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Saumya Upadhyaya\"},{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2021-04-27\",\n      }",{"title":1644,"description":1645,"authors":1650,"heroImage":1646,"date":1653,"body":1654,"category":752,"tags":1655},[1651,1652],"Saumya Upadhyaya","Dov Hershkovitch","2021-04-27","\n\nThe term [\"GitOps\"](/topics/gitops/) first emerged in the Kubernetes community as a way for organizations to enable Ops teams move at the pace of application development. With improved automation and less risk, GitOps is quickly becoming the workflow of choice for infrastructure automation.\n\nAt GitLab, the approach to GitOps goes beyond Kubernetes. Before the buzz around GitOps picked up in the DevOps community, GitLab users and customers were applying GitOps principles to all types of infrastructure, including physical servers, virtual machines, containers, and Kubernetes clusters ([multicloud](/topics/multicloud/) and on-premise).\n\n## What is GitOps?\n\nThere are two main [approaches to GitOps](https://www.gitops.tech/), a push-based approach and a pull-based approach.\n\n- *Push-based approach*: A CI/CD tool pushes the changes to the environment. Applying GitOps via push is consistent with the approach used for application deployment. In this case, deployment targets for a push-based approach are not limited to Kubernetes.\n![push based deployment](https://about.gitlab.com/images/blogimages/gitops-push.png){: .shadow.medium.center}\nHow the push-based approach works for GitOps.\n{: .note.text-center}\n\n- *Pull-based approach*: An agent installed in a cluster pulls changes whenever there is a deviation from the desired configuration. In the pull-based approach, deployment targets are limited to Kubernetes and an agent must be installed in each Kubernetes cluster.\n![pull based deployment](https://about.gitlab.com/images/blogimages/gitops-pull.png){: .shadow.medium.center}\nHow the pull-based approach works for GitOps.\n{: .note.text-center}\n\n## How to employ GitOps principles using GitLab\n\nGitLab supports both of the approaches mentioned above, which can be used with and without a Kubernetes agent. Along with the [recently introduced Kubernetes agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/), GitLab supports GitOps principles by supporting a three types of deployment targets and environments: The single application for infrastructure code; configurations using CI/CD for automation; and merge requests for collaboration and controls.\n\nBelow we unpack three methods for applying GitOps principles using GitLab technology.\n\n### Push using manually configured CI/CD release targets\n\nThe infrastructure configurations are stored in git. The user sets up the [supported deployment targets](/install/) and uses the standard CI/CD workflow to push infrastructure changes. To ensure the desired state in the repository is consistent with the environment, CI/CD will need to run on a regular schedule to identify drift and reconcile as required. Manual intervention may be required at times to cater to failed pipelines. Many GitLab users have been using this approach to push infrastructure changes to their test, staging, and production environments.\n\nThe manual push approach is ideal for both Kubernetes and supported non-Kubernetes environments, such as embedded systems, on-premise servers, mainframes, virtual machines, or FaaS offerings.\n\n### Push using Terraform\n\nIn this approach, an out-of-the box [integration with Terraform](https://docs.gitlab.com/ee/user/infrastructure/) helps Terraform users seamlessly implement GitOps workflows using GitLab. Terraform manifests are stored in the Git repository where users can collaborate on changes within the merge requests. The Terraform plan reports can be displayed within the merge requests and the Terraform state can be stored using the GitLab-managed Terraform state backend. Everything is integrated into GitLab, which spares users from performing these tasks via third-party tools or integrations.\n\nThe push approach is ideal for both Kubernetes and non-Kubernetes deployment targets that are supported by Terraform.\n\n### Pull using a Kubernetes agent\n\nIn fall 2020, GitLab [introduced a Kubernetes agent](/blog/gitlab-kubernetes-agent-on-gitlab-com/) that initiates a secure web-socket connection from a Kubernetes cluster to a GitLab instance. There is a GitLab server component that polls for any repository changes on the server and informs the agent when there is a deviation between the desired state and the cluster environment. This process helps minimize the load on the cluster and network. Whenever a drift is detected the agent pulls the latest configurations from the git repository and updates the environment accordingly. This GitOps approach requires the Kubernetes agent to be installed on every Kubernetes cluster, which can be done with ease as the GitLab Agent for Kubernetes uses GitOps principles to install and update the agent as required. This GitOps method is ideal for Kubernetes environments only.\n\n![kubernetes agent](https://about.gitlab.com/images/blogimages/gitops-agent.png){: .shadow.medium.center}\nInside the pull-based approach using a Kubernetes agent.\n{: .note.text-center}\n\n### Up next: Push using a Kubernetes agent\n\nGitLab also aims to support GitOps is by using a push approach with a Kubernetes agent. The push based approach using manually configured Kubernetes target attaches a Kubernetes cluster to GitLab through a certificate exchange. This approach leverages the CI/CD workflow for infrastructure automation and is fairly straightforward, but it also introduces risk by opening up a firewall and using cluster admin rights for cluster integration. To overcome these challenges while leveraging the CI/CD workflow - the [push-based approach using the Kubernetes agent](https://gitlab.com/groups/gitlab-org/-/epics/5528) aims to reuse the web-socket interface to establish a secure connection between GitLab and the Kubernetes cluster and allows GitLab CI/CD to securely push changes using this interface. When available, this approach would also provide a migration path for users who are currently setting up the Kubernetes integration using a certificate exchange.\n\nThe third approach is ideal for Kubernetes environments only. When available, it can be used in conjuction with the pull-based approach to optimize the GitOps workflow.\n\n## Accelerate the SDLC with GitOps principles\n\nWhether you are using physical, virtual, containers, Kubernetes - on-prem or cloud-based infrastructures – GitLab uses GitOps principles a variety of ways to meet your team wherever it's at. GitLab supports many different options because we understand the typical organization has a mixed IT landscape, with various heterogeneous technologies in a number of different environments.\n\n***What’s your preferred approach to GitOps?*** Drop us a comment.\n\n## Learn more about GitOps at GitLab\n\nRead on to explore how GitLab works with different technologies to deliver a GitOps solution for every company at every stage.\n\n* ***Blog***: [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n* ***Webcast***: [GitLab and HashiCorp - A holistic guide to GitOps and the Cloud Operating Model](/webcast/gitlab-hashicorp-gitops/)\n* ***Testimonial***: [Shaping a financial service’s cloud strategy using GitLab and Terraform](https://www.youtube.com/watch?v=2LF3eOoGV_o&list=PLFGfElNsQthb4FD4y1UyEzi2ktSeIzLxj&index=6)\n\nCover image by [Rodolfo Cuadros](https://unsplash.com/@rocua18) on [Unsplash](https://unsplash.com/photos/JKzgp6vhJ8M)\n{: .note}\n",[558,1062,859,9],{"slug":1657,"featured":6,"template":734},"gitops-done-3-ways","content:en-us:blog:gitops-done-3-ways.yml","Gitops Done 3 Ways","en-us/blog/gitops-done-3-ways.yml","en-us/blog/gitops-done-3-ways",{"_path":1663,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1664,"content":1669,"config":1675,"_id":1677,"_type":14,"title":1678,"_source":16,"_file":1679,"_stem":1680,"_extension":19},"/en-us/blog/google-cloud-next-anthos-kubernetes",{"title":1665,"description":1666,"ogTitle":1665,"ogDescription":1666,"noIndex":6,"ogImage":1283,"ogUrl":1667,"ogSiteName":720,"ogType":721,"canonicalUrls":1667,"schema":1668},"Google Cloud Next: Doubling down on Kubernetes and multi-cloud","Everything you need to know from last week’s big event.","https://about.gitlab.com/blog/google-cloud-next-anthos-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Cloud Next: Doubling down on Kubernetes and multi-cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Melissa Smolensky\"}],\n        \"datePublished\": \"2019-04-16\",\n      }",{"title":1665,"description":1666,"authors":1670,"heroImage":1283,"date":1672,"body":1673,"category":300,"tags":1674},[1671],"Melissa Smolensky","2019-04-16","\nLast week at Google Next we saw Google bet big on Kubernetes. Google announced Anthos,\na multi-cloud platform based on Kubernetes, as well as Cloud Run, Google Cloud’s commercial Knative offering.\nThe key technology at the center of these two big announcements is Kubernetes.\nAs [Janakiram MSV](https://twitter.com/janakiramm) stated in a [Forbes article](https://www.forbes.com/sites/janakirammsv/2019/04/14/everything-you-want-to-know-about-anthos-googles-hybrid-and-multi-cloud-platform/#68ffc6d05b66) in regards to Anthos,\n\n> The core theme of Anthos is application modernization. Google envisages a future where all enterprise applications will run on Kubernetes.\n\nAnd in his [New Stack article](https://thenewstack.io/how-google-cloud-run-combines-serverless-with-containers/) about Cloud Run,\n\n> Like the way it offered a managed Kubernetes service before any other provider, Google moved fast in exposing Knative through Cloud Run to developers.\n\nFor a quick overview of the news at Google Next, [Brandon Jung](https://twitter.com/brandoncjung),\nVP of Alliances at GitLab, gives a quick recap of the news and how it impacts GitLab. Take a look.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/teRaXAPbfoA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nLaunched by Google in 2014 at the first DockerCon, Kubernetes has become the de facto standard\nfor container orchestration. This May, 12,000 people will gather at KubeCon Barcelona to\nlearn how to implement and use Kubernetes to drive forward cloud native application development within their organizations.\n\nHere at GitLab we embraced Kubernetes early on as well, and we are continuing to take our\ndedication further, putting the power of Kubernetes in the developer workflow.\nEven the CNCF uses GitLab to provide cross-project\ncontinuous integration and interoperability testing.\n\n## Kubernetes throughout every step of the software development lifecycle\n\n“By allowing people to quickly connect Kubernetes clusters to their projects we are helping many\nenterprises embrace the cloud native way of building applications,” says Sid Sijbrandij, CEO at GitLab.\n“By providing a single application we allow enterprise developer and operations teams to embrace\nKubernetes every step of the way in their software development process.\nWe’ve seen a large financial institution go from a single build every two weeks to over 1,000\nself-served builds a day using GitLab. It is wonderful to see the scale we can unlock for organizations\nby providing access to Kubernetes in the developer workflow.”\n\n## GitLab plus Kubernetes\n\nIf you are looking to get started using [Kubernetes with GitLab](/solutions/kubernetes/),\nyou can easily connect any existing Kubernetes cluster on any platform to GitLab by using\nGitLab’s native Kubernetes integration. GitLab even makes it easy to set up and configure new\nclusters with just a few clicks using the Google Kubernetes Engine (GKE) integration.\nOnce connected, teams can install managed applications like Helm Tiller, Ingress,\nand Prometheus to their cluster with a single click in the GitLab interface.\nConnected clusters are available as a deploy target from GitLab CI/CD and are monitored\nusing GitLab’s bundled Prometheus capabilities.\n\nWe love seeing the community embrace GitLab and Kubernetes.\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">getting back to grips with \u003Ca href=\"https://twitter.com/hashtag/GitLab?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLab\u003C/a> CICD with \u003Ca href=\"https://twitter.com/hashtag/Terraform?src=hash&amp;ref_src=twsrc%5Etfw\">#Terraform\u003C/a> jobs and knocked up a \u003Ca href=\"https://twitter.com/hashtag/Kubernetes?src=hash&amp;ref_src=twsrc%5Etfw\">#Kubernetes\u003C/a> cluster for the runner! \u003Ca href=\"https://twitter.com/hashtag/devops?src=hash&amp;ref_src=twsrc%5Etfw\">#devops\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/devoops?src=hash&amp;ref_src=twsrc%5Etfw\">#devoops\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/nomorejenkins?src=hash&amp;ref_src=twsrc%5Etfw\">#nomorejenkins\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/SRE?src=hash&amp;ref_src=twsrc%5Etfw\">#SRE\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/GCP?src=hash&amp;ref_src=twsrc%5Etfw\">#GCP\u003C/a>\u003C/p>&mdash; Ferris Hall (@Ferrish07) \u003Ca href=\"https://twitter.com/Ferrish07/status/1106252265218703360?ref_src=twsrc%5Etfw\">March 14, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"en\" dir=\"ltr\">I&#39;ve just posted a little experience report. I&#39;m now using \u003Ca href=\"https://twitter.com/hashtag/Kubernetes?src=hash&amp;ref_src=twsrc%5Etfw\">#Kubernetes\u003C/a>  to spread my build load, thanks to \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> and \u003Ca href=\"https://twitter.com/GCPcloud?ref_src=twsrc%5Etfw\">@GCPcloud\u003C/a>. \u003Ca href=\"https://t.co/KGQ9kyEEP5\">https://t.co/KGQ9kyEEP5\u003C/a>\u003C/p>&mdash; Paul Hicks (@tenwit) \u003Ca href=\"https://twitter.com/tenwit/status/1104828372197113856?ref_src=twsrc%5Etfw\">March 10, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\" data-lang=\"en\">\u003Cp lang=\"pl\" dir=\"ltr\">GitLab CI/CD &amp;&amp; Kubernetes by Bruno Fonseca \u003Ca href=\"https://t.co/ZDymOsbKfc\">https://t.co/ZDymOsbKfc\u003C/a>\u003C/p>&mdash; Paulo George Bezerra (@paulobezerr) \u003Ca href=\"https://twitter.com/paulobezerr/status/1108049894877659136?ref_src=twsrc%5Etfw\">March 19, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\nCover image by [Cody Schroeder](https://unsplash.com/@codyrs) on [Unsplash](https://unsplash.com/photos/L99UKlcUBJY?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,278,1229,1147,859],{"slug":1676,"featured":6,"template":734},"google-cloud-next-anthos-kubernetes","content:en-us:blog:google-cloud-next-anthos-kubernetes.yml","Google Cloud Next Anthos Kubernetes","en-us/blog/google-cloud-next-anthos-kubernetes.yml","en-us/blog/google-cloud-next-anthos-kubernetes",{"_path":1682,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1683,"content":1689,"config":1695,"_id":1697,"_type":14,"title":1684,"_source":16,"_file":1698,"_stem":1699,"_extension":19},"/en-us/blog/google-next-2018-recap",{"title":1684,"description":1685,"ogTitle":1684,"ogDescription":1685,"noIndex":6,"ogImage":1686,"ogUrl":1687,"ogSiteName":720,"ogType":721,"canonicalUrls":1687,"schema":1688},"Google Next 2018 Recap","Several GitLab team-members participated in Google Next in San Francisco. Here’s a recap of what went on.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679821/Blog/Hero%20Images/melody-meckfessel-gitlab-google-next-keynote.png","https://about.gitlab.com/blog/google-next-2018-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Next 2018 Recap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2018-07-27\",\n      }",{"title":1684,"description":1685,"authors":1690,"heroImage":1686,"date":1692,"body":1693,"category":300,"tags":1694},[1691],"William Chia","2018-07-27","\n\n## Google Partner Award Winner for Innovative Solution in Developer Ecosystem\n\nGoogle's Partner Summit kicked off a day before the broader Next conference started. At the summit, we were honored to receive the Google Cloud Partner Award for Innovative Solution in Developer Ecosystem for the [tight integration with GKE](/partners/technology-partners/google-cloud-platform/) we released earlier this year. Of course, we decided to take some fun photos with the cloud logo.\n\n![Sid Sijbrandij and Google execs](https://about.gitlab.com/images/blogimages/google-next-2018/sid-sijbrandij-google-execs.jpg){: .shadow.large.center}\n\n![Sid Sijbrandij and Google tech partner team](https://about.gitlab.com/images/blogimages/google-next-2018/sid-sijbrandij-google-tech-partner-team.jpg){: .shadow.large.center}\n\n![Eliran Mesika with GitLab's award + GitLab team with award](https://about.gitlab.com/images/blogimages/google-next-2018/eliran-mesika-gitlab-google-award-team.jpg){: .large.center}\n\n## Launch partner for GCP Marketplace with Kubernetes Apps\n\n![GCP Marketplace launch partners at Google Next](https://about.gitlab.com/images/blogimages/google-next-2018/gcp-marketplace-launch-partners-google-next.jpg){: .shadow.medium.center}\n\nWhile the GCP Marketplace announcement went out a few days before the show, there was still [a lot of buzz about it at Google Next](https://www.youtube.com/watch?v=C6koWw0r07Y&amp=&t=28m29s). In addition to traditional apps, which deploy VMs on Compute Engine, the new GCP Marketplace now supports Kubernetes apps, which deploy to a Kubernetes cluster running on Google Kubernetes Engine. We were happy to be a launch partner, offering the ability to [install GitLab via the GCP Marketplace](/blog/install-gitlab-one-click-gcp-marketplace/) on day one.\n\n## Serverless, Knative, and Istio\n\n[Knative](https://cloud.google.com/knative/) and [Istio](https://istio.io/) are two new projects announced during the show that we're excited about. Knative enables \"serverless\" workloads on Kubernetes while Istio is a service mesh for microservices. Check out [Josh](/company/team/#joshlambert) chatting live with [Sid](/company/team/#sytses) from the show (where Wi-Fi was a bit choppy) about serverless, Knative, and Istio, and how these technologies can potentially tie in with GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/k1jK4F4NoBw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Google Cloud Build + GitLab CI/CD\n\nOne of the key announcements from the show was the introduction of Google Cloud Build, a CI/CD tool for GCP. Many folks asked us if we saw this as competitive to GitLab CI/CD, and how that would affect our partnership with Google. First and foremost, GitLab supports a multi-cloud strategy. We partner with all of the major cloud vendors to ensure GitLab CI/CD can support multi-cloud deployments. Many cloud vendors have their own CI/CD tooling, like AWS Code Deploy or IBM Cloud Pipelines. For us, Cloud Build is just another point of collaboration. In fact, our own [Josh Lambert](/company/team/#joshlambert) teamed up with [Christopher Sanson](https://www.linkedin.com/in/christophersanson/) to create a GitLab + Google demo for Christopher's session, \"CI/CD for Hybrid and Multi-Cloud Customers.\"\n\n![Christopher Sanson demos GitLab CI/CD with Cloud Build](https://about.gitlab.com/images/blogimages/google-next-2018/christopher-sanson-gitlab-cicd.jpg){: .shadow.medium.center}\n\nFirst, Christopher showed how to use GitLab as your code repo with Cloud Build as your CI/CD connected up via webhooks to Cloud Functions. Here's a link to some [sample code for setting up a Cloud Function to trigger cloud build from GitLab](https://gitlab.com/joshlambert/cloud-function-trigger) if you'd like to try it out yourself.\n\nThen Christopher showed how to use GitLab CI/CD and GitLab container registry while offloading the infrastructure build to Google Cloud Build. Using Google Cloud Build together with GitLab CI/CD is one way to overcome some of the security problems of docker-in-docker (e.g. requires privileged containers). Check out the video below to see it in action. Additionally, here's an example ruby app with a [sample configuration for connecting Gitlab CI/CD to Cloud Build](https://gitlab.com/joshlambert/minimal-ruby-app/merge_requests/1/diffs).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/IUKCbq1WNWc?start=1324\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\n## GitLab.com is migrating to GCP\n\n![Melody Meckfessel talks GitLab GCP migration during keynote](https://about.gitlab.com/images/blogimages/google-next-2018/melody-meckfessel-gitlab-google-next-keynote.png){: .shadow.medium.center}\n\n>\"Our friends at GitLab have created a complete open source DevOps stack\" - [Melody Meckfessel](https://www.linkedin.com/in/melodymeckfessel/), Vice President of Engineering, Google Cloud Platform\n\nAs part of our plans to make GitLab.com a rock solid, enterprise-ready SaaS offering, we are migrating from Azure to Google Cloud Platform. We’ve been carefully planning this migration for many months and are now very close to executing with a target migration date of August 11. Melody Meckfessel talked a bit about our migration during her keynote on Thursday. Check out our previous blog post to read up on the [full details of GitLab’s GCP migration](/blog/gcp-move-update/).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/JQPOPV_VH5w?start=1363\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Talking to you\n\n![William, Mike, and Reb in the GitLab booth](https://about.gitlab.com/images/blogimages/google-next-2018/william-chia-mike-walsh-gitlab-booth-duo.jpg){: .shadow.large.center}\n\nOf course one of our favorite parts of any trade show is getting to meet our users and customers face to face. We love hearing the palpable excitement when you talk about how GitLab is streamlining your toolchain or easing your move to Kubernetes. We love sharing the story with folks who don’t know yet and seeing their faces light up when we tell them GitLab’s not just a version control solution, but an end-to-end DevOps application with built-in project planning, CI/CD, container registry, monitoring, and more. Google Next ’18 was a great show, and we can’t wait to see you next time! Check out the [full list of events](/events/) we’ll be at to find one close to you.\n",[278,1147,9,1229,859],{"slug":1696,"featured":6,"template":734},"google-next-2018-recap","content:en-us:blog:google-next-2018-recap.yml","en-us/blog/google-next-2018-recap.yml","en-us/blog/google-next-2018-recap",{"_path":1701,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1702,"content":1708,"config":1714,"_id":1716,"_type":14,"title":1717,"_source":16,"_file":1718,"_stem":1719,"_extension":19},"/en-us/blog/google-next-2018-security-track-recap",{"title":1703,"description":1704,"ogTitle":1703,"ogDescription":1704,"noIndex":6,"ogImage":1705,"ogUrl":1706,"ogSiteName":720,"ogType":721,"canonicalUrls":1706,"schema":1707},"Google Next 2018 security track recap","Here's how one GitLab team-member made the most of the security track at Google Next 2018.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678940/Blog/Hero%20Images/securitygooglenext.jpg","https://about.gitlab.com/blog/google-next-2018-security-track-recap","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Next 2018 security track recap\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jim Thavisouk\"}],\n        \"datePublished\": \"2018-08-10\",\n      }",{"title":1703,"description":1704,"authors":1709,"heroImage":1705,"date":1711,"body":1712,"category":300,"tags":1713},[1710],"Jim Thavisouk","2018-08-10","\nEvery time someone asks me how I like working at GitLab, I say, \"I love it here!\"\nWith our [company culture](https://handbook.gitlab.com/handbook/values/), 100 percent [remote workforce](https://handbook.gitlab.com/handbook/company/culture/all-remote/), and [growing team](/jobs/), it's a pleasure\nto work with such a high energy team.\nThe [security department](https://handbook.gitlab.com/handbook/security/#security-department)\nis continually growing -- very fast! We each have our own specialties and bring a diverse selection\nof strong experiences, while working very well together. In my position, I have\nbeen focusing very heavily on policy as code to raise the bar in security here at GitLab. This blog post was inspired by [William Chia](/company/team/#thewilliamchia)'s\n[Google Next 2018 recap](/blog/google-next-2018-recap/). If you haven't read it, I highly recommend it!\n\n## Security highlights of Google Next 2018\n\n### Forseti\n\nI was excited coming into this conference for [Forseti](https://forsetisecurity.org/),\nespecially with the announcement of\n[Forseti 2.0](https://forsetisecurity.org/news/2018/06/11/forseti-2.0-launch.html).\nWe had a [Forseti Hack Day](https://groups.google.com/a/forsetisecurity.org/forum/#!topic/announce/bHy8QCK_AY0)\nthat kicked off a day before the actual conference, which allowed me to interact\nwith Google engineers, product managers, and Forseti customers. For\nanyone who missed Forseti's session from [Chris Law](https://www.linkedin.com/in/chrislaw/),\n[Michael Capicotto](https://www.linkedin.com/in/mcapicotto/), and\n[Marten Van Wezel](https://www.linkedin.com/in/martenvanwezel/), you can check it out\n[the recording](https://www.youtube.com/watch?v=4TrlgbV_VlQ). See [the details for joining the discussion here](https://groups.google.com/a/forsetisecurity.org/forum/#!topic/announce/8OSAB7UEzSY).\n\n### Istio\n\n[\"Istio is platform-independent and designed to run in a variety of environments,\nincluding those spanning Cloud, on-premise, Kubernetes, Mesos, and more.\"](https://istio.io/docs/concepts/what-is-istio/)\nI'm excited to see Istio 1.0, which was just released a few days ago! See [the team's talk](https://youtu.be/eOI2aM9P7-c)\nfrom [Tao Li](https://www.linkedin.com/in/tao-li-1a447935/) and\n[Samrat Ray](https://www.linkedin.com/in/samratray/).\n\n### Best practices\n\nEveryone can use best practices. At Forseti Hack Day, I met [Tom Salmon](https://www.linkedin.com/in/tomcsalmon/)\nwho has vast experience in security. In his [talk](https://www.youtube.com/watch?v=ZQHoC0cR6Qw),\nhe provides a great knowledge base and reference point to best security practices in GCP.\n\n### Sessions are now live\n\nThese were only a few sessions at Google Next, and there are hundreds of others\nto check out. You can find them neatly categorized on\n[YouTube](https://www.youtube.com/channel/UCTMRxtyHoE3LPcrl-kT4AQQ/playlists?flow=grid&view=50&shelf_id=8).\n\n## We'd love to hear your feedback\n\nWe'd love to hear from you on how you use any of these products in your environment.\nOur team is currently working very closely with the Forseti team, and I'm sure they\nwould love to have you join in on the discussion as well. Don't hesitate to\nreach out directly to me by email (jthavisouk@gitlab.com) or join any of these groups to keep a dialogue going\nabout any of these products. We can only help each other in the process.\n",[278,1147,9,1229,859,979],{"slug":1715,"featured":6,"template":734},"google-next-2018-security-track-recap","content:en-us:blog:google-next-2018-security-track-recap.yml","Google Next 2018 Security Track Recap","en-us/blog/google-next-2018-security-track-recap.yml","en-us/blog/google-next-2018-security-track-recap",{"_path":1721,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1722,"content":1727,"config":1733,"_id":1735,"_type":14,"title":1736,"_source":16,"_file":1737,"_stem":1738,"_extension":19},"/en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications",{"title":1723,"description":1724,"ogTitle":1723,"ogDescription":1724,"noIndex":6,"ogImage":1115,"ogUrl":1725,"ogSiteName":720,"ogType":721,"canonicalUrls":1725,"schema":1726},"How GitLab improves cloud native application security and protection","In this article, we will show you how GitLab can help you streamline your cloud native application security from a code and operations point of view by providing you with real-world examples.","https://about.gitlab.com/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab improves cloud native application security and protection\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nico Meisenzahl\"}],\n        \"datePublished\": \"2020-08-18\",\n      }",{"title":1723,"description":1724,"authors":1728,"heroImage":1115,"date":1730,"body":1731,"category":576,"tags":1732},[1729],"Nico Meisenzahl","2020-08-18","\n{::options parse_block_html=\"true\" /}\n\nIn the [cloud-native](/topics/cloud-native/) ecosystem, decisions and changes are made on a rapid basis. Applications get adapted and deployed multiple times a week or even day. Microservices get developed decentralized with different peoples and teams involved. In such an environment, it is crucial to ensure that applications are developed and operated safely. This can be done by shifting security left into the developer lifecycle but also by using DevSecOps to empower operations with enhanced monitoring and protection for the application runtime.\n\nIn this article, I would like to show you how GitLab can help you streamline your application security from a code and operations point of view by providing you with real-world examples. Before we deep dive into the example, let me first introduce you to the [GitLab Secure](https://about.gitlab.com/stages-devops-lifecycle/secure/) and [GitLab Protect](https://about.gitlab.com/stages-devops-lifecycle/govern/) product portfolio which are the foundation for this. GitLab Secure helps developers to enable accurate, automated, and continuous assessment of their applications by proactively identifying vulnerabilities and weaknesses and therefore minimizing security risk. GitLab Protect, on the other hand, supports operations by proactively protecting environments and cloud-native applications by providing context-aware technologies to reduce overall security risk. Both are backed by leading open-source projects that have been fully integrated into developer and operation processes and the GitLab user interface (UI).\n\n## Cloud Native Application Security: The attack\n\nLet’s assume we have an application hosting a web interface that allows a user to provide some input. The application is written in [Golang](https://golang.org/) and executes the input as part of an external operating system command ([os/exec](https://golang.org/pkg/os/exec/)). The application does not contain any validation or security features to validate the input, which allows us to inject additional commands that are also executed in the application environment.\n\nThe application is running as containerized microservices in a Kubernetes cluster. The Kubernetes Cluster is shared across multiple teams and projects, allowing us to inject and read data in another application running next to ours. In our example, we will connect an unsecured Redis instance in a different Namespace and read/write data.\n\nNow let us take a closer look at how GitLab can help us detect the attack, permit its execution, and finally help us find and fix the root cause in our code.\n\n## Container Host Security\n\n[Container Host Security](/stages-devops-lifecycle/govern/) helps us to detect an attack in real-time by monitoring the pod for any unusual activity. It can then alert operations with detailed information on the attack itself.\n\nContainer Host Security is powered by [Falco](https://falco.org/), an open-source runtime security tool that listens to the Linux kernel using eBPF. Falco parses system calls and asserts the stream against a configurable rules engine in real-time. The Falco deployment used by Container Host Security can be deployed and fully managed using [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html).\n\nIn our example, Falco detects the injected redis-cli command, which is used to read/write data into the unsecured Redis instance.\n\n![Container Host Security](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/falco.png)\n\nFalco can now alert operations who can use those valuable insights to define and execute further steps.\n\n## Container Network Security\n\nA first step to permit access to the unsecured Redis instance would be to permit traffic between the application in our Kubernetes cluster. This can be done by using [Container Network Security](/stages-devops-lifecycle/govern/). Container Network Security is again fully managed by [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html) and can also be configured within the GitLab project user interface.\n\nContainer Network Security is powered by [Cilium](https://cilium.io/), an open-source networking plugin for Kubernetes that can be used to implement support for NetworkPolicy resources. [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can be used to detect and block unauthorized network traffic between pods and to/from the Internet.\n\nImplementing Network Policies for our application will block the underlying network traffic generated by the attack. The policies can be enabled within the GitLab project UI:\n\n![Network Policies](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/network-polices.png)\n\n## Web Application Firewall\n\nWith Container Network Security in place, our attack isn’t able to talk to the Redis instance anymore, but it is still possible to execute other network unrelated attacks using the command injection. [Web Application Firewall (WAF)](/stages-devops-lifecycle/govern/) can now help us to increase the security and detect and block the attack at the [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) level.\n\nThe Web Application firewall is also powered by open-source. It is based on the [ModSecurity](https://kubernetes.github.io/ingress-nginx/user-guide/third-party-addons/modsecurity/) module, a toolkit for real-time web application monitoring, logging, and access control. It is preconfigured to use the [OWASP’s Core Rule Set](https://www.modsecurity.org/CRS/Documentation/), which provides generic attack detection capabilities. Like the other integrations, Web Application Firewall is also fully managed by GitLab using [GitLab Managed Apps](https://docs.gitlab.com/ee/update/removals.html).\n\nIn our example, the Web Application Firewall detects the attack and is also able to block it:\n\n![Web Application Firewall logs](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/waf-log.png)\n\nBlocking the attack at the Ingress level will help us to deny the traffic before it hits our application. To do so, we can enable the Web Application Firewall blocking mode directly from the GitLab UI:\n\n![WAF settings](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/waf-settings.png)\n\nIn addition to Container Host Security, we could have used the Web Application Firewall to detect the attack using the Thread Monitoring dashboard within our GitLab project:\n\n![Thread Monitoring](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/thread-monitoring.png)\n\nThe Thread Monitoring dashboard also provides us with useful insights and metrics of our enforced Container Network Policy.\n\n## Static Application Security Testing\n\nWe have now successfully protected our application runtime and ensured that no additional attacks can be executed. But we should also find and fix the root cause to ensure that such incidents are not recurring in the future. This is where [Static Application Security Testing (SAST)](/stages-devops-lifecycle/secure/) can help us. Static Application Security Testing can be easily integrated into our project using [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) and then allows us to analyze our [source code](/solutions/source-code-management/) for known vulnerabilities.\n\nIn our case (a Golang application) the code scanning is executed using the open-source project [Golang Security Checker](https://github.com/securego/gosec). The results are displayed in the Security dashboard of our GitLab project for easy access:\n\n![Security Dashboard](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/sec-dashboard.png)\n\nIn our example, the code scan has identified the root cause and provides us with detailed information about the vulnerability, the line of code that needs to be fixed, and the ability to easily create an issue to fix it.\n\n![SAST](https://about.gitlab.com/images/blogimages/2020-08-18-How-GitLab-Can-Help-You-Secure-Your-Cloud-Native-Applications/sast.png)\n\nFinally, of course, we should also talk to the team running the other application to make sure that their Redis instance gets secured too. We should also verify how the other [GitLab Secure](https://about.gitlab.com/stages-devops-lifecycle/secure/) features can help to further improve the overall security of the application.\n\n## GitLab Protect and Secure in action\n\nIf you like to get more insights on GitLab Secure and Protect and want to see it in action, you are welcome to join [Wayne](https://gitlab.com/whaber), [Philippe](https://gitlab.com/plafoucriere) and myself in our session [“Your Attackers Won't Be Happy! How GitLab Can Help You Secure Your Cloud-Native Applications!”](https://gitlabcommitvirtual2020.sched.com/event/dUWw/your-attackers-wont-be-happy-how-gitlab-can-help-you-secure-your-cloud-native-applications) at GitLab Commit where you can gain further insights on Container Host Security, Container Network Security, Web Application Firewall (WAF), and Status Application Security Testing (SAST).\n\nRegister today and join me and others at [GitLab Commit](https://about.gitlab.com/events/) on August 26. GitLab Commit 2020 is a free 24-hour virtual experience filled with practical DevOps strategies shared by leaders in development, operations, and security.\n",[9,563,859,731,979],{"slug":1734,"featured":6,"template":734},"how-gitlab-can-help-you-secure-your-cloud-native-applications","content:en-us:blog:how-gitlab-can-help-you-secure-your-cloud-native-applications.yml","How Gitlab Can Help You Secure Your Cloud Native Applications","en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications.yml","en-us/blog/how-gitlab-can-help-you-secure-your-cloud-native-applications",{"_path":1740,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1741,"content":1747,"config":1753,"_id":1755,"_type":14,"title":1756,"_source":16,"_file":1757,"_stem":1758,"_extension":19},"/en-us/blog/how-secret-detection-can-proactively-revoke-leaked-credentials",{"title":1742,"description":1743,"ogTitle":1742,"ogDescription":1743,"noIndex":6,"ogImage":1744,"ogUrl":1745,"ogSiteName":720,"ogType":721,"canonicalUrls":1745,"schema":1746},"How Secret Detection can proactively revoke leaked credentials","GitLab extends Secret Detection capabilities to customers on Google Cloud.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664923/Blog/Hero%20Images/security-checklist.png","https://about.gitlab.com/blog/how-secret-detection-can-proactively-revoke-leaked-credentials","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Secret Detection can proactively revoke leaked credentials\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Connor Gilbert\"}],\n        \"datePublished\": \"2023-06-13\",\n      }",{"title":1742,"description":1743,"authors":1748,"heroImage":1744,"date":1750,"body":1751,"category":979,"tags":1752},[1749],"Connor Gilbert","2023-06-13","\n\nModern applications don’t run on their own: They rely on databases, cloud services, APIs, and other services. To connect to those systems, the applications use credentials like private keys and API tokens. These credentials have to be kept secret – if they’re leaked, adversaries can abuse them to steal data, mine cryptocurrency, or disable important systems. Today, we’re increasing the level of protection we offer GitLab Ultimate users against this serious risk via an expansion of our partnership with Google Cloud.\n\n## How GitLab addresses this risk\n[GitLab Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) addresses the risk of leaked secrets by detecting when keys, tokens, and other sensitive values are exposed in code and helping DevSecOps teams respond. It’s imperative to respond quickly when credentials are leaked, especially for keys to cloud provider accounts, since adversaries can do a lot of damage quickly. \n\nWith our expanded partnership, we’ve integrated GitLab Secret Detection with Google Cloud to better protect customers who use GitLab to develop applications on Google Cloud. Now, if an organization leaks a Google Cloud credential to a public project on GitLab.com, GitLab can automatically protect the organization by working with Google Cloud to protect the account. This protection is available in GitLab Ultimate.\n\n## GitLab’s investment in automated response\nGitLab has added support for multiple cloud platforms with [automatic response to leaked secrets](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html), including the [automatic revocation of GitLab Personal Access Tokens (PATs)](https://about.gitlab.com/blog/pat-revocation-coming-soon/). We’re working on more integrations now, and are always looking for more cloud service vendors seeking similar protection to join [our partner program](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html#partner-program-for-leaked-credential-notifications).\n\nWe’ve also [recently expanded](https://about.gitlab.com/releases/2023/04/22/gitlab-15-11-released/#automatic-response-to-leaked-secrets-on-any-public-branch) the places automatic responses are triggered. Secret Detection users are now protected from credential leaks as soon as they appear in any public branch on GitLab.com.\n\n## Why we’re investing here\nSecurity is better when it’s integrated throughout the software development lifecycle. GitLab’s [2023 Security Without Sacrifices report](https://about.gitlab.com/press/releases/2023-04-20-gitlab-seventh-devsecops-report-security-without-sacrifices.html) found that security is one of the top benefits of a DevSecOps platform. GitLab’s DevSecOps platform enhances secure software development by helping developers and security professionals collaborate to prevent business-critical vulnerabilities. Now, in collaboration with Google Cloud, we’re adding an additional layer of protection for our mutual customers.\n\n## Better protection for GitLab/Google Cloud customers\nGoogle Cloud users on GitLab.com are now better protected. The new integration protects projects that:\n\n* are public. Private projects are unaffected by this change.\n* are hosted on GitLab.com. Projects on GitLab Dedicated or self-managed instances are unaffected.\n* use Secret Detection. If you haven't enabled Secret Detection for a project, we currently won't search it for secrets to revoke.\n\nSecret Detection searches for three types of secrets issued by Google Cloud:\n\n1. [Service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys)\n2. [API keys](https://cloud.google.com/docs/authentication/api-keys)\n3. [OAuth client secrets](https://support.google.com/cloud/answer/6158849#rotate-client-secret)\n\nPublicly leaked secrets are sent to Google Cloud after they’re discovered. Google Cloud verifies the leaks, then works to protect customer accounts against abuse.\n\n## How the Google Cloud integration works\nOur Google Cloud integration is on by default for projects that use GitLab Secret Detection on GitLab.com. Secret Detection scanning is available in all GitLab tiers, but an automatic response to leaked secrets is currently [only available in Ultimate projects](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html#feature-availability).\n\n* To protect a project, [enable GitLab Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/#enable-secret-detection).\n* To protect your entire organization, consider [enforcing scan execution](https://docs.gitlab.com/ee/user/application_security/index.html#enforce-scan-execution) to run Secret Detection in all of your projects.\n\n## What’s next\n\nWe’re excited to improve Secret Detection with this integration, but we aren’t stopping here. Check our [strategy and plans](https://about.gitlab.com/direction/secure/static-analysis/secret-detection/#strategy-and-themes) to learn more about where we’re headed.\n\n_GitLab can help secure your applications, whether they run on Google Cloud or elsewhere. Learn more about our [security and governance solutions](https://about.gitlab.com/solutions/application-security-testing/)._\n",[979,9,283],{"slug":1754,"featured":6,"template":734},"how-secret-detection-can-proactively-revoke-leaked-credentials","content:en-us:blog:how-secret-detection-can-proactively-revoke-leaked-credentials.yml","How Secret Detection Can Proactively Revoke Leaked Credentials","en-us/blog/how-secret-detection-can-proactively-revoke-leaked-credentials.yml","en-us/blog/how-secret-detection-can-proactively-revoke-leaked-credentials",{"_path":1760,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1761,"content":1767,"config":1772,"_id":1774,"_type":14,"title":1775,"_source":16,"_file":1776,"_stem":1777,"_extension":19},"/en-us/blog/how-to-secure-cloud-run-deployment-with-auto-devops",{"title":1762,"description":1763,"ogTitle":1762,"ogDescription":1763,"noIndex":6,"ogImage":1764,"ogUrl":1765,"ogSiteName":720,"ogType":721,"canonicalUrls":1765,"schema":1766},"How to secure Google Cloud Run deployment with GitLab Auto DevOps","This tutorial will help teams speed development, improve security, and harness the power of serverless technology.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682898/Blog/Hero%20Images/cloud-security.png","https://about.gitlab.com/blog/how-to-secure-cloud-run-deployment-with-auto-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to secure Google Cloud Run deployment with GitLab Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2023-08-21\",\n      }",{"title":1762,"description":1763,"authors":1768,"heroImage":1764,"date":1769,"body":1770,"category":752,"tags":1771},[1268],"2023-08-21","\nTeams looking for efficiency often look to GitLab and serverless platforms to minimize management overhead and speed deployment times. GitLab's tight integration with [Google Cloud Run](https://cloud.google.com/run) means that teams can take advantage of the industry-leading DevSecOps platform to deliver container-based applications securely and efficiently.\n\nThis tutorial will show you how to deploy applications to Cloud Run using GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), a feature that lets developers quickly use CI/CD pipelines via pre-built templates. This approach can accelerate testing and deployment because stages and jobs are already pre-configured.\n\n## Prerequisites\nBefore you begin, make sure you have the following:\n- a Google Cloud project with Cloud Run and Cloud Build APIs enabled\n- a Google Cloud service account with Cloud Run Admin, Cloud Build Service Agent, Service Account User, and Project Viewer permissions\n- a GitLab project containing your application code\n\n### Demo walkthrough\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/hIFagDyo3f8\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\n\n**Step 1:** Configure Google Cloud credentials\n\nTo start, use the Google Cloud service account with the necessary permissions. Once you have the service account, export its key to a JSON file and encode it using base64.\n\n**Step 2:** Add Auto DevOps to your GitLab project\n\nNavigate to your GitLab project and create a new file at the root called \"gitlab-ci.yml.\" Add the following lines of code to include the Auto DevOps template, which automatically configures your pipeline based on project settings and configuration:\n\n```\ninclude:\n  - template: Auto-DevOps.gitlab-ci.yml\n```\n\nCommit the changes to your project.\n\n**Step 3:** Configure environment variables\n\nAdd the following environment variables to your GitLab project:\n\n* `BASE64_GOOGLE_CLOUD_CREDENTIALS`: The base64-encoded JSON file containing your service account key. Make sure to mask this variable.\n* `PROJECT_ID`: The Google Cloud project ID.\n* `SERVICE_ID`: The service ID that will be used for Cloud Run. For this tutorial, we'll use \"nodejs\" as our service ID.\n\n**Step 4:** Configure the CI/CD pipeline\n\nModify the \"gitlab-ci.yml\" file to add Google Cloud SDK, gcloud commands, Docker, and the necessary configurations for deploying your application to Cloud Run. \n\n```\nimage: google/cloud-sdk:latest\n```\n\nAdditionally, use Google Cloud Build to generate the container image required for deployment. Commit the changes to your project.\n\n```\ndeploy:\n  stage: deploy\n  script:\n    - export GOOGLE_CLOUD_CREDENTIALS=$(echo $BASE64_GOOGLE_CLOUD_CREDENTIALS | base64 -d)\n    - echo $GOOGLE_CLOUD_CREDENTIALS > service-account-key.json \n    - gcloud auth activate-service-account --key-file service-account-key.json \n    - gcloud config set project $PROJECT_ID \n    - gcloud auth configure-docker\n    - gcloud builds submit --tag gcr.io/$PROJECT_ID/$SERVICE_ID\n    - gcloud run deploy $SERVICE_ID --image gcr.io/$PROJECT_ID/$SERVICE_ID --region=us-central1 --platform managed --allow-unauthenticated \n```\n\n**Step 5:** Finalize the DAST stage\n\nOnce your application has been deployed to Cloud Run, complete the dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)) stage in the CI/CD pipeline to ensure your application is more secure. Add the Cloud Run URL to your \"gitlab-ci.yml\" file and enable full_scan and browser_scan options. Commit the changes to your project.\n\n```\nvariables:\n  DAST_WEBSITE: \u003Cproject URL>\n  DAST_FULL_SCAN_ENABLED: \"true\"\n  DAST_BROWSER_SCAN: \"true\" \n```\n\nIn this tutorial, we successfully deployed a Cloud Run application using GitLab's Auto DevOps. By following these steps, you can enjoy faster development and improved security, and harness the power of serverless technology.\n",[1146,9,979],{"slug":1773,"featured":6,"template":734},"how-to-secure-cloud-run-deployment-with-auto-devops","content:en-us:blog:how-to-secure-cloud-run-deployment-with-auto-devops.yml","How To Secure Cloud Run Deployment With Auto Devops","en-us/blog/how-to-secure-cloud-run-deployment-with-auto-devops.yml","en-us/blog/how-to-secure-cloud-run-deployment-with-auto-devops",{"_path":1779,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1780,"content":1786,"config":1793,"_id":1795,"_type":14,"title":1796,"_source":16,"_file":1797,"_stem":1798,"_extension":19},"/en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab",{"title":1781,"description":1782,"ogTitle":1781,"ogDescription":1782,"noIndex":6,"ogImage":1783,"ogUrl":1784,"ogSiteName":720,"ogType":721,"canonicalUrls":1784,"schema":1785},"How we optimized infrastructure spend at GitLab","We keep our cloud spend under control with a spend optimization framework – now we're sharing it with you.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681701/Blog/Hero%20Images/piggy_bank.jpg","https://about.gitlab.com/blog/how-we-optimized-our-infrastructure-spend-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we optimized infrastructure spend at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Davis Townsend\"}],\n        \"datePublished\": \"2020-10-27\",\n      }",{"title":1781,"description":1782,"authors":1787,"heroImage":1783,"date":1789,"body":1790,"category":752,"tags":1791},[1788],"Davis Townsend","2020-10-27","\n\nInfrastructure spend optimization is a hot topic these days as many established companies are migrating workloads to the cloud. Similarly, fast-growing startups are struggling to control their operating costs as they expand their cloud footprint to meet user demand.\n\nAt GitLab we have taken a methodical and data-driven approach to the problem so we can reduce our cloud spend and control our operating costs, while still creating great features for our customers. We designed a five-stage framework which emphasizes building awareness of our infrastructure spend to the point where any change in costs is well understood and no longer a surprise.\n\nOur framework is very similar to a normal data maturity framework (shown below) that would progress through descriptive, predictive, and finally prescriptive analytics, but we tailor it specifically for this domain. I'll explain each stage and what it looks like at GitLab so you can see how you might apply it to your own organization.\n\n![Normal Data Maturity Framework](https://about.gitlab.com/images/blogimages/2020-10-28-How-We-Optimized-Infra-spend/DMM.jpeg \"Normal Data Maturity Framework\"){: .medium.center}\nA normal data maturity framework\n{: .note.text-center}\n\n## Spend optimization framework\n\n## 1. Basic cost visibility\n This stage can be thought of as data exploration. You just want to understand as much as you can about where you are spending money at a high level. What vendors and services are you spending the most money on? This data is generally provided by cloud vendors through a billing console, as well as through billing exports. I've found the way to get the best use out of both options is to use the provided billing console for answering simple questions about specific costs quickly, and the exports for integrating this data into your own analytics architecture for more granular reporting, [multicloud](/topics/multicloud/) reporting, or for specific recurring reports you need over a longer time horizon.\n\n### GitLab example\nWhen starting out, we looked at Google Cloud Platform (GCP) and their [Default Billing Export](https://cloud.google.com/billing/docs/how-to/export-data-bigquery) to get an overview of which products/projects/SKUs were responsible for the majority of our spend.\n\n## 2. Cost allocation\nThis stage is all about going from high-level areas of spend to more granular dimensions that tie back to relevant business metrics in your company. At GitLab we may want to look at what we spend on particular services like CI runners, or what we spend to support employees using GitLab.com as part of their job vs. customer spend. This data may not be readily available to you so there could be a lot of work involved to tie these sorts of relevant business dimensions back to the cost reports provided by your vendor.\n\n### GitLab example\nFor our production architecture we had some [GCP labels](https://cloud.google.com/compute/docs/labeling-resources) that indicated the internal service applied to the majority of our instances, so we started with those to see which services we spent most of our money on. More recently, we have created a [handbook page for Infrastructure Standards](https://handbook.gitlab.com/handbook/company/infrastructure-standards/) around project creation and label naming so that we can get even more insight out of our bill.\n\n\n\n## 3. Optimize usage efficiency\nOnce you can allocate costs to their relevant business metrics, then can you start to ask interesting questions such as, “Why is our storage spend so high on feature x?” By asking these questions and then talking with the subject matter experts about these potential areas of optimization you can start to come up with ideas to reduce some of this cost.\n\n### GitLab example\nWhen we reached this stage we began to identify many areas of opportunity, including:\n\n- [CI runners](https://gitlab.com/gitlab-org/gitlab/-/issues/35777): One of the areas discovered from stage 2 happened to be our CI runners, for which we created more granular reporting to see the cost by specific repos, pipelines, and jobs, which allowed us to find some ways to optimize our own internal use of CI.\n- [Object storage](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10087): We discovered high storage costs for outdated Postgres backups. We resolved this by enabling bucket lifecycle policies and reduced our object storage for that bucket by 900TB.\n- [Network usage](https://gitlab.com/gitlab-com/gl-infra/infrastructure/-/issues/10222): By correlating a recent change in our spend profile to a network architecture change, we were able to highlight the need for additional changes. We ultimately implemented a change to directly download runner artifacts from GCS instead of having the traffic be proxied. This significantly reduced our overall networking cost.\n\n## 4. Measure business outcomes vs spend\n\nWhen you get to a point for a particular area where you feel like you have done all the basic optimizations and aren't sure where else you could reduce cost without seriously impacting your employees or customers, you have reached stage 4. This stage is all about analyzing the value of more complex changes that could reduce spend at the expense of something else, as well as considering the value and cost impact of major feature or architectural changes in the future.\n\n### GitLab example\nOur best example of this was our recent rollout of [advanced global search](https://docs.gitlab.com/ee/user/search/advanced_search.html) to all paid users on GitLab.com. In the first iterations of testing for this feature our costs were exceptionally high. Through a lot of hard work by the team responsible for the feature, they were able to significantly bring down the costs while improving functionality. Through those efforts, GitLab was able to bring this great feature to the platform in a way that also made sense from a business perspective.\n\n## 5. Predict future spend and problem areas\nOnce your company has matured the practices above, you can start to become proactive about observing cost. You can also begin to detect and alert when spend is outside expected thresholds. Once you get to this point, infrastructure optimization should become a boring topic, and when you no longer have any cases of huge unexpected cost increases that were not due to unexpected increases in customer demand, you know you are doing a great job.\n\n### GitLab example\n\nWe’re still working on this stage ourselves. While we’ve had some success in detecting unexpected spend, and even tying it to anomalous behavior in our platform, we recognize we have much more to do here. We are still working to get most of our usage to Stages 3-4, while spending parallel effort to reach Stage 5 for some more mature workloads.\n\n## Current state and next steps\nToday at GitLab, depending on the workload, we are anywhere between stages 1-4. The bulk of the work is going into getting everything to at least stage 2, and from there we can work on getting everything to stages 3-4. Current efforts include applying our newly created [infrastructure standards](https://handbook.gitlab.com/handbook/company/infrastructure-standards/) across all of our infrastructure, bringing in relevant product usage data from our various services, and giving PMs the tools they need to better manage the cost of their services through a single source of truth of base level cost metrics.\n\n## Workflow and planning\nCost optimization is a difficult topic to tackle effectively as it involves many different stakeholders across the business who all have their own priorities. The way we are taking this problem on at GitLab is we have an [issue board](https://gitlab.com/groups/gitlab-com/-/boards/1502173?label_name[]=infrafin) where we plan and track progress on issues related to infrastructure spend. For all the major initiatives we assign priority to these based on four factors:\n\n1.  Cost savings\n2.  Customer impact\n3.  Future potential cost impact\n4.  Effort required\nThese factors are discussed and reviewed by our analyst, our SaaS offering product manager, and the relevant subject matter expert for the area. Once the priority is agreed upon, the product manager works with various product teams to get these scheduled into milestones or backlog queues for the teams that need to implement the changes. Progress is tracked on the issue board, and reviewed for priority to ensure the solution moves forward at an appropriate velocity.\n\n## More to read\n\nAll of this info and more can be found in our [Cost Management Handbook](https://handbook.gitlab.com/handbook/engineering/infrastructure/cost-management/). We continue to improve this page to provide our own employees with the resources they need to understand this topic better, as well as providing external viewers some idea of how they could think about infrastructure optimization in their own company.\n\nYou might also enjoy:\n* [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n* [How we migrated application servers from Unicorn to Puma](/blog/migrating-to-puma-on-gitlab/)\n* [How we upgraded PostgreSQL at GitLab.com](/blog/gitlab-pg-upgrade/)\n\nCover image by [Fabian Blank](https://unsplash.com/@blankerwahnsinn?utm_source=unsplash&amp;utm_medium=referral&amp;utm_content=creditCopyText) on [Unsplash](https://unsplash.com)\n{: .note}\n",[1147,1475,1792,9],"collaboration",{"slug":1794,"featured":6,"template":734},"how-we-optimized-our-infrastructure-spend-at-gitlab","content:en-us:blog:how-we-optimized-our-infrastructure-spend-at-gitlab.yml","How We Optimized Our Infrastructure Spend At Gitlab","en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab.yml","en-us/blog/how-we-optimized-our-infrastructure-spend-at-gitlab",{"_path":1800,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1801,"content":1807,"config":1812,"_id":1814,"_type":14,"title":1815,"_source":16,"_file":1816,"_stem":1817,"_extension":19},"/en-us/blog/install-gitlab-one-click-gcp-marketplace",{"title":1802,"description":1803,"ogTitle":1802,"ogDescription":1803,"noIndex":6,"ogImage":1804,"ogUrl":1805,"ogSiteName":720,"ogType":721,"canonicalUrls":1805,"schema":1806},"Install GitLab with a single click from the new GCP Marketplace","GitLab is now available on the new Google Cloud Platform Marketplace, so you can deploy GitLab on Google Kubernetes Engine with a single click!","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680061/Blog/Hero%20Images/gcp-send-gitlab-large.png","https://about.gitlab.com/blog/install-gitlab-one-click-gcp-marketplace","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Install GitLab with a single click from the new GCP Marketplace\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Chia\"}],\n        \"datePublished\": \"2018-07-18\",\n      }",{"title":1802,"description":1803,"authors":1808,"heroImage":1804,"date":1809,"body":1810,"category":300,"tags":1811},[1691],"2018-07-18","\nToday, Google Cloud announced its [new Google Cloud Platform (GCP) marketplace](https://cloudplatform.googleblog.com/2018/07/introducing-commercial-kubernetes-applications-in-gcp-marketplace.html) with the ability to deploy applications to your Kubernetes clusters on Google Kubernetes Engine (GKE). We’re proud to make GitLab available in the GCP Marketplace from day one. While you can [install GitLab almost anywhere](/install/), the new GCP Marketpklace app installs with just a single click. It's the easiest way to get your own self-managed GitLab instance up and running.\n\n![Deploy GitLab on Google Cloud Platform](https://about.gitlab.com/images/google-cloud-platform/gcp-send-gitlab-medium.png)\n\n### Not looking to manage your own instance?\n\nFolks who don’t want to take on the overhead of administering their own GitLab instance can [sign up for GitLab.com](https://gitlab.com/users/sign_in). GitLab.com is a SaaS offering that runs the same software as GitLab self-managed, managed by GitLab.\n\nRecently, we announced our [migration from Azure to GCP](/blog/moving-to-gcp/). This migration is the first step in our goal of running GitLab.com as a cloud native application on Kubernetes. The migration has involved careful planning along with decomposing GitLab into individual services. The lessons learned through our migration have translated directly into our how we are building the GitLab Helm Chart. The work we’ve done to migrate GitLab.com has fueled our ability to offer a solid option for self-managed users to deploy GitLab to Kubernetes.\n\n### Want to deploy your application to Kubernetes?\n\nWith a built-in container registry and [Kubernetes integration](/solutions/kubernetes/), GitLab makes it easier than ever to get started with containers and cloud native development. [Gitlab CI/CD](/topics/ci-cd/) can deploy your application to any Kubernetes cluster.\n\nIf you don’t have a Kubernetes cluster, we’ve got you covered. The easiest way to get set up in using our [GKE Integration](/partners/technology-partners/google-cloud-platform/) and [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/). It takes just a few clicks to set up, then you have a full deployment pipeline. Just commit your code and GitLab does rest.\n\n![GitLab deploys your app to Google Cloud Platform](https://about.gitlab.com/images/google-cloud-platform/gitlab-send-app-medium.png)\n\n#### Join us at Google Next\n\nNext week on July 24-27 we’ll be at [Google Nex](https://cloud.withgoogle.com/next18/sf/)t in San Francisco, where there’s a lot going on. [Follow GitLab on Twitter](https://twitter.com/gitlab) to stay up to date on announcements from the show. If you’re at the show, stop by booth #S1629 and say hi! We’d love to hear how you are using GitLab and show you how our GKE Integration and Marketplace install work.  \n\n#### Summary\n\nYou can use GitLab either as a self-managed app or as a service on GitLab.com. Today, we’ve made it easier than ever to install [GitLab with the GCP Marketplace](https://console.cloud.google.com/marketplace/details/gitlab-public/gitlab?filter=solution-type:k8s). Additionally, we’ll be moving GitLab.com to GCP and soon afterward to GKE. You can look forward to the increased stability and performance that Kubernetes will bring to GitLab.com. Regardless of whether you are using self-managed GitLab or GitLab.com, GitLab’s Kubernetes integration and GKE integration make it easy to deploy your app to Kubernetes. Stop by Google Next and follow our Twitter feed to get the latest news on using GitLab together with Google Cloud Platform.\n",[9,1229,1147,859],{"slug":1813,"featured":6,"template":734},"install-gitlab-one-click-gcp-marketplace","content:en-us:blog:install-gitlab-one-click-gcp-marketplace.yml","Install Gitlab One Click Gcp Marketplace","en-us/blog/install-gitlab-one-click-gcp-marketplace.yml","en-us/blog/install-gitlab-one-click-gcp-marketplace",{"_path":1819,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1820,"content":1825,"config":1830,"_id":1832,"_type":14,"title":1833,"_source":16,"_file":1834,"_stem":1835,"_extension":19},"/en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud",{"title":1821,"description":1822,"ogTitle":1821,"ogDescription":1822,"noIndex":6,"ogImage":1488,"ogUrl":1823,"ogSiteName":720,"ogType":721,"canonicalUrls":1823,"schema":1824},"How to integrate GitLab.com with Jira Cloud","Check out how to use the GitLab App on the Atlassian Marketplace to connect your merge requests, branches, and commits to a Jira issue.","https://about.gitlab.com/blog/integrating-gitlab-com-with-atlassian-jira-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate GitLab.com with Jira Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tye Davis\"}],\n        \"datePublished\": \"2021-03-25\",\n      }",{"title":1821,"description":1822,"authors":1826,"heroImage":1488,"date":1827,"body":1828,"category":752,"tags":1829},[1493],"2021-03-25","By moving to the cloud engineering teams can accelerate innovation and scale resources across an organization. The ease of access and reduced infrastructure costs that comes with moving to the cloud is a direct result of using a platform that easily integrates your data and keeps it secure yet accessible. Gitlab.com, the cloud (SAAS) platform for GitLab, modernizes data platforms to leverage new applications and advances end-to-end software delivery. GitLab partners with other best-in-class cloud companies so your teams can use tools that best align with your team's DevOps ecosystem. Application development requires speed and iteration, making seamless collaboration a necessity to deliver real business value. GitLab embraces connecting all phases of the software development lifecycle (SDLC) in a DevOps ecosystem that fuels visibility, collaboration, and velocity.\n\n## How to use GitLab with Atlassian's Jira\n\nWe know that many companies have been using Jira for project management, and have existing data and business processes built into their instance. For some of these customers, this means it can be difficult and cost-prohibitive to move off of Jira. We believe that people (and tools) work better when they're all in one place, so to serve these customers, we built a seamless integration between GitLab and Jira. By using the [GitLab for Jira app in the Atlassian Marketplace](https://marketplace.atlassian.com/apps/1221011/gitlab-com-for-jira-cloud), you can integrate GitLab.com and Jira Cloud harmoniously.\n\nHere's a short list of what you can do when integrating GitLab with Jira:\n\n* One GitLab project integrates with all the Jira projects in a single Jira instance.\n* Quickly navigate to Jira issues from GitLab.\n* Detect and link to Jira issues from GitLab commits and merge requests.\n* Log GitLab events in the associated Jira issue.\n* Automatically close (also called \"transition\") Jira issues with GitLab commits and merge requests.\n\n## How to configure the integration\n\nThere are two methods for configuring the integration. The [Jira DVCS connector](https://docs.gitlab.com/ee/integration/jira/dvcs/), and the method we describe in this blog post. The DVCS connector updates data only once per hour, while our method syncs data in real time. We recommend using our method for this reason, but if you are not using both of these environments then use the Jira DVCS connector instead.\n\n- First, go to Jira Settings > Apps > Find new apps, then search for GitLab.\n- Next, click GitLab for Jira, then click \"Get it now\". Or, go the [App in the marketplace](https://marketplace.atlassian.com/apps/1221011/gitlab-for-jira), directly.\n\n![Arrow pointing to \"get it now button\" on GitLab on Atlassian Marketplace App](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabonatlassianmarketplace.png){: .shadow.medium.center}\nClick the yellow button to download the app.\n{: .note.text-center}\n\n- Third, after installing, click \"Get started to go to the configurations\" page. This page is always available under Jira Settings > Apps > Manage apps.\n\n![GitLab on Atlassian Marketplace App](https://about.gitlab.com/images/blogimages/atlassianjira/manageappsjira.png){: .shadow.medium.center}\nClick the \"Get started button\".\n{: .note.text-center}\n\n- Fourth, in Namespace, enter the group or personal namespace, and then click \"Link namespace to Jira\". The user that is setting up GitLab for Jira must have Maintainer access to the GitLab namespace. Note: The GitLab user only needs access when adding a new namespace. For syncing with Jira, we do not depend on the user’s token.\n\n![GitLab for Jira Configuration](https://about.gitlab.com/images/blogimages/atlassianjira/gitlabforjiraintegration.png){: .shadow.medium.center}\nAdd a namespace.\n{: .note.text-center}\n\nAfter a namespace is added, all of the future commits, branches, and merge requests within all projects under that namespace will be synced to Jira. At the moment, past data cannot be synced.\n\nFor more information, see [the documentation](https://docs.gitlab.com/ee/integration/jira/index.html#usage).\n\n### How to troubleshoot GitLab for Jira\n\nThe GitLab for Jira App uses an iframe to add namespaces on the settings page. Some browsers block cross-site cookies which can lead to a message saying that the user needs to log on to GitLab.com even though the user is already logged in: \"You need to sign in or sign up before continuing.\"\n\nIn this situation, we recommend using [Firefox](https://www.mozilla.org/en-US/firefox/), [Google Chrome](https://www.google.com/chrome/index.html) or enabling cross-site cookies in your browser.\n\n### What are the limitations of GitLab for Jira?\n\nThis integration is currently not supported on GitLab instances under a [relative URL](https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-a-relative-url-for-gitlab) (for example, http://yourcompanyname.com/gitlab).\n\n## How to use GitLab for Jira\n\nAfter the integrating GitLab and Jira, you can:\n\n- Refer to any Jira issue by its ID in GitLab branch names, commit messages, and merge request titles.\n\n- Using commit messages in GitLab, you can move Jira issues along that Jira projects defined transitions.\n\n![GitLab for Jira Setup](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot.png){: .shadow.medium.center}\nIn this image, you can see that this Jira issue has four stages: Backlog, selected for development, in progress, and done.\n{: .note.text-center}\n\n- As referenced in the base GitLab-Jira integration, when you reference an issue in a comment on a merge request and commit, e.g., PROJECT-7, the basic integration adds a comment in Jira issue. Also, by commenting in a Jira transition (putting a # first), this will move a Jira issue to the desired transition. Below is an example using the built-in GitLab Web IDE (this can be done in your Web IDE of choice as well).\n\n![View of Jira Transitions](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot2.png){: .shadow.medium.center}\nThere are multiple Jira transition options.\n{: .note.text-center}\n\n- Now, the user can see linked branches, commits, and merge requests in Jira issues (merge requests are called \"pull requests\" in Jira issues).\nJira issue IDs must be formatted in UPPERCASE for the integration to work.\n\n![View branches, commits and merge requests in your jira issue](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot4.png){: .shadow.medium.center}\nView branches, commits, and merge requests in your Jira issue.\n{: .note.text-center}\n\n- Click the links to see your GitLab repository data.\n\n![Deep Dive into your GitLab commits](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot5.png){: .shadow.medium.center}\nHow to take a look at your GitLab commits.\n{: .note.text-center}\n\n![Deep Dive into your GitLab branches](https://about.gitlab.com/images/blogimages/atlassianjira/jiraissuescreenshot6.png){: .shadow.medium.center}\nTake a deep Dive into your GitLab merge requests.\n{: .note.text-center}\n\nFor more information on using Jira Smart Commits to track time against an issue, specify an issue transition, or add a custom comment, see the Atlassian page using [Smart Commits](https://support.atlassian.com/jira-cloud-administration/docs/enable-smart-commits/).\n\n## Watch and learn\n\nMore of a video person? For a walkthrough of the integration with GitLab for Jira, watch and learn how to configure GitLab Jira Integration using Marketplace App.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/SwR-g1s1zTo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nGitLab helps teams ship software faster with technology integration options, such as the integration with Jira, that automate tasks, provide visibility into development progress and the greater end-to-end software lifecycle. We recognize that many companies use Jira for Agile project management and our seamless integration brings Jira together with GitLab.\n\nCover image by [Mikołaj Idziak](https://unsplash.com/@mikidz) on [Unsplash](https://unsplash.com/photos/nwjRmbXbLgw).\n{: .note.text-left}\n",[9,563,1062],{"slug":1831,"featured":6,"template":734},"integrating-gitlab-com-with-atlassian-jira-cloud","content:en-us:blog:integrating-gitlab-com-with-atlassian-jira-cloud.yml","Integrating Gitlab Com With Atlassian Jira Cloud","en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud.yml","en-us/blog/integrating-gitlab-com-with-atlassian-jira-cloud",{"_path":1837,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1838,"content":1843,"config":1850,"_id":1852,"_type":14,"title":1853,"_source":16,"_file":1854,"_stem":1855,"_extension":19},"/en-us/blog/introducing-the-gitlab-kubernetes-agent",{"title":1839,"description":1840,"ogTitle":1839,"ogDescription":1840,"noIndex":6,"ogImage":808,"ogUrl":1841,"ogSiteName":720,"ogType":721,"canonicalUrls":1841,"schema":1842},"Understand the new GitLab Agent for Kubernetes","Just released in 13.4, our brand new Kubernetes Agent provides a secure and K8s–friendly approach to integrating GitLab with your clusters.","https://about.gitlab.com/blog/introducing-the-gitlab-kubernetes-agent","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand the new GitLab Agent for Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2020-09-22\",\n      }",{"title":1839,"description":1840,"authors":1844,"heroImage":808,"date":1846,"body":1847,"category":752,"tags":1848},[1845],"Viktor Nagy","2020-09-22","\n\nWe are happy to share the first iteration of the GitLab Agent for Kubernetes with our users and community. The Agent is the foundation for the next generation of the integration between GitLab and Kubernetes. \n\n## A bit of history of the GitLab Kubernetes Integrations\n\nGitLab's current Kubernetes integrations were introduced more than three years ago. Their primary goal was to allow a simple setup of clusters and provide a smooth deployment experience to our users. These integrations served us well in the past years but at the same time its weaknesses were limiting for some important and crucial use cases. The biggest weaknesses we see with the current integration are:\n\n- the requirement to open up the cluster to the internet, especially to GitLab\n- the need for cluster admin rights to get the benefit of GitLab Managed Clusters\n- exclusive support for push-based deployments that might not suit some highly regulated industries\n\nA few months ago, the Configure Team at GitLab started going in a new direction to come up with an integration that could address these weaknesses and provide a cloud native tie-in between GitLab and Kubernetes. This new direction is built on the GitLab Agent for Kubernetes, which we released in [GitLab 13.4](/releases/2020/09/22/gitlab-13-4-released/).\n\n## Design goals\n\nWhen we sat down to solve for the above weaknesses, we came up with a few principles that we are seeking to follow.\n\nWe want to be good cloud native citizens, and work together with the community, instead of reinventing the wheel.\n\nWe primarily want to serve expert Kubernetes platform engineers. While the current GitLab Managed Clusters and cluster creation from within GitLab might serve many use cases, it's primarily aimed at simple cluster setup and is not flexible enough to be the basis for production clusters. We want to change this approach, and are focusing on the needs of expert Kubernetes engineers first. We think that coming up with sane defaults will provide the necessary simplicity for new Kubernetes users as well.\n\nWe want to offer a secure solution that allows cluster operators to restrict GitLab's rights in the cluster and does not require opening up the cluster to the Internet.\n\n## The Agent\n\nFollowing the above goals, we've started to develop the GitLab Agent for Kubernetes. The Agent provides a permanent communication channel between GitLab and the cluster. To follow industry best practices for [GitOps](/topics/gitops/) it is configured by code, instead of a UI.\n\nThe current version of the Agent allows for pull-based deployments. Its deployment machinery is built on the [`gitops-engine`](https://github.com/argoproj/gitops-engine), a project initiated by ArgoCD and Flux where GitLab engineers are actively contributing as well.\n\n### Setting up the GitLab Agent\n\nThe Agent needs to be set up first. This requires a few actions from the user:\n\n- create an Agent token for authentication with GitLab, and store it in your cluster as a secret\n- commit the necessary Agent configurations in one of your repositories\n- install the Agent to your cluster\n\n### Deployments with an Agent\n\nAs mentioned above, the Agent needs a configuration directory inside one of your repositories. This configuration describes the projects that the Agent syncs into your clusters. We call the synced projects the __manifest project__. The manifest project should contain Kubernetes manifest files. The __manifest project__ project might be either inside or separated from your application code.\n\nWe've set up a simple example that shows a __manifest project__ and an __application project__. In this example [GitLab CI/CD](/topics/ci-cd/) in the __application project__ is used to create a container image and update the __manifest project__. Then the Agent picks up the changes from the __manifest project__, and deploys the Kubernetes manifests stored there.\n\n### Limitations\n\nAs this is the initial release of the Agent, it has many known limitations. We don't support all the amazing features the previous GitLab Kubernetes integration does such as [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), deploy boards, GitLab Managed Apps, etc. To start in GitLab 13.4 we limited our focus to supporting pull-based deployment for Helm-based GitLab installations. \n\nFollowing the current release, we will be focusing on:\n\n- [shipping the GitLab Agent for Kubernetes as part of the Official Linux Package](https://gitlab.com/groups/gitlab-org/-/epics/3834)\n- [supporting the deployment of private repositories](https://gitlab.com/gitlab-org/gitlab/-/issues/220912)\n\n## Further plans for GitLab Kubernetes Integrations\n\nThe Agent opens up many new opportunities for GitLab's Kubernetes integrations. Having an active component allows us to provide all the GitLab functionalities in locked down clusters as well. We're currently looking into the following areas to support with the agent:\n\n- integrate cluster-side dynamic container scanning with GitLab\n- use GitLab as an authentication and authorization provider for Kubernetes clusters\n- offer linters and checks for Kubernetes best practices on deployed resources\n- proxy cluster services easily through GitLab\n\nYou can see all our plans in the [Agent epic](https://gitlab.com/groups/gitlab-org/-/epics/3329) where we invite you to give us feedback and about this direction. \n\nYou can view a demo of how to install and use the GitLab Agent below:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/505413162\" width=\"640\" height=\"480\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n",[859,1849,9,1429],"releases",{"slug":1851,"featured":6,"template":734},"introducing-the-gitlab-kubernetes-agent","content:en-us:blog:introducing-the-gitlab-kubernetes-agent.yml","Introducing The Gitlab Kubernetes Agent","en-us/blog/introducing-the-gitlab-kubernetes-agent.yml","en-us/blog/introducing-the-gitlab-kubernetes-agent",{"_path":1857,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1858,"content":1864,"config":1870,"_id":1872,"_type":14,"title":1873,"_source":16,"_file":1874,"_stem":1875,"_extension":19},"/en-us/blog/introducing-workspaces-beta",{"title":1859,"description":1860,"ogTitle":1859,"ogDescription":1860,"noIndex":6,"ogImage":1861,"ogUrl":1862,"ogSiteName":720,"ogType":721,"canonicalUrls":1862,"schema":1863},"A first look at workspaces: On-demand, cloud-based development environments","Remote development workspaces are now available in Beta for GitLab Premium and Ultimate users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682731/Blog/Hero%20Images/code-editor-workspace.jpg","https://about.gitlab.com/blog/introducing-workspaces-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A first look at workspaces: On-demand, cloud-based development environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Eric Schurter\"}],\n        \"datePublished\": \"2023-05-23\",\n      }",{"title":1859,"description":1860,"authors":1865,"heroImage":1861,"date":1867,"body":1868,"category":1250,"tags":1869},[1866],"Eric Schurter","2023-05-23","\n\nCloud-based development tools are quickly gaining popularity for their ability to provide a consistent, secure developer experience and streamline developer onboarding, which can reduce the time it takes for a developer to contribute to a codebase from days to hours, or even minutes. To support this shift in developer workflows, GitLab has introduced a significant upgrade in [remote development](/direction/create/ide/remote_development/) in GitLab 16.0. Secure, on-demand, cloud-based development workspaces are now available in Beta for GitLab Premium and Ultimate users on GitLab.com and on self-managed instances.\n\nThe December 2022 [release of the Web IDE Beta](/blog/get-ready-for-new-gitlab-web-ide/) delivered a familiar, feature-rich editing experience in your browser and the ability to connect to a remote server and interact with a cloud-based runtime environment. Workspaces take experience to the next level by bringing the configuration, orchestration, and management of your remote development environments into GitLab for the first time.\n\n### What is a workspace? \nA workspace is your personal, ephemeral development environment in the cloud, created using centrally managed and curated dependencies defined in code. Developing with a workspace enables you to spend less time configuring your local development environment and more time focusing on writing code. Instead of managing package updates and troubleshooting version conflicts, consistent and reproducible cloud-based environments are available on demand. \n\nEach workspace is a unique instance of your environment so you can switch between tasks seamlessly and have confidence that your environment will remain stable between sessions. Whether used as a tool to accelerate developer onboarding, provide stable environments for education purposes, or improve security by limiting the need to clone code locally, workspaces will change how you develop software on GitLab.\n\n### How do you create a workspace? \nTo create a workspace in GitLab, you’ll need: \n\n- **A cloud platform or self-hosted Kubernetes cluster:** This release is focused on delivering a “bring your own infrastructure” solution. We know that many of you want complete control over your infrastructure and code, so we have prioritized hosting workspaces on your own infrastructure or in the cloud platform of your choice. \n\n- **An agent:** Everything starts with the GitLab Agent for Kubernetes. Once you have the agent running in a Kubernetes cluster, [configuring remote development](https://docs.gitlab.com/ee/user/workspace/#prerequisites) is a matter of installing a couple of dependencies and adding a few lines of code to the agent configuration. \n\n- **A devfile:** After you have an agent configured, you need to define your environment in a `.devfile.yaml` file, stored at the root of a project. In this file, you can specify container images, map ports, define volume mounts, [and more](https://docs.gitlab.com/ee/user/workspace/#relevant-schema-properties). \n\n- **An editor:** In the first iteration, we are supporting the Web IDE and injecting it into the workspace. In future iterations, we will add support for other editors like [Jupyter Notebook](https://gitlab.com/gitlab-org/gitlab/-/issues/408381).\n\n- Optionally, you can [override the default timeout for your workspace](https://docs.gitlab.com/ee/user/workspace/index.html#create-a-workspace) to make sure you’re using cloud resources efficiently. Since workspaces are meant to be ephemeral, the default lifespan is 24 hours, but it can be set as high as a week. \n\nAfter you’ve created your workspace, you can launch the Web IDE with a single click and get right to work. \n\nWant to see it in action? This short video walks you through the configuration of an agent and the creation of a workspace: \n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/lDVaOtO_JVM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### What comes next?\nWe’re excited to start getting your feedback so we’re introducing this as a Beta for public projects. Your credentials aren't currently being injected into the workspace during its creation, which means you can’t automatically clone private repositories. You can, however, create a workspace and authenticate yourself manually after it’s running. We’ll be working on [injecting credentials](https://gitlab.com/groups/gitlab-org/-/epics/10480) as well as addressing some other points of friction to make this even easier for developers to adopt. We’ll also be working on: \n\n- [Connecting to a workspace via SSH from your desktop IDE](https://gitlab.com/groups/gitlab-org/-/epics/10478)\n- [Support for alternative editors](https://gitlab.com/groups/gitlab-org/-/epics/10635) like Jupyter Notebook or vim\n- [Configure instance or group-level usage limits to manage cloud resources](https://gitlab.com/groups/gitlab-org/-/epics/10571)\n- [Support for architectures other than amd64](https://gitlab.com/groups/gitlab-org/-/epics/10594)\n\nAs you can see, we have an ambitious roadmap ahead of us, and we want to hear from you. Please let us know how you are using workspaces, what features are most important to you, and share any issues you run into along the way in the [public feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031). We can’t wait to see how you integrate workspaces into your DevSecOps workflow to improve the developer experience!\n\n**Disclaimer**: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\n\nCover image by [AltumCode](https://unsplash.com/@altumcode) on [Unsplash](https://unsplash.com/photos/dC6Pb2JdAqs)\n{: .note}\n\n",[9,796,1250],{"slug":1871,"featured":6,"template":734},"introducing-workspaces-beta","content:en-us:blog:introducing-workspaces-beta.yml","Introducing Workspaces Beta","en-us/blog/introducing-workspaces-beta.yml","en-us/blog/introducing-workspaces-beta",{"_path":1877,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1878,"content":1884,"config":1892,"_id":1894,"_type":14,"title":1895,"_source":16,"_file":1896,"_stem":1897,"_extension":19},"/en-us/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci",{"title":1879,"description":1880,"ogTitle":1879,"ogDescription":1880,"noIndex":6,"ogImage":1881,"ogUrl":1882,"ogSiteName":720,"ogType":721,"canonicalUrls":1882,"schema":1883},"OIDC simplifies GitLab CI/CD authentication with Google Cloud","OpenID Connect can sometimes be complex, but it's the safer and recommended way to authenticate your GitLab pipeline with Google Cloud. This tutorial shows you how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669863/Blog/Hero%20Images/security-pipelines.jpg","https://about.gitlab.com/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How OIDC can simplify authentication of GitLab CI/CD pipelines with Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hiroki Suezawa\"},{\"@type\":\"Person\",\"name\":\"Dhruv Jain\"}],\n        \"datePublished\": \"2023-06-28\",\n      }",{"title":1885,"description":1880,"authors":1886,"heroImage":1881,"date":1889,"body":1890,"category":979,"tags":1891},"How OIDC can simplify authentication of GitLab CI/CD pipelines with Google Cloud",[1887,1888],"Hiroki Suezawa","Dhruv Jain","2023-06-28","In recent years, the [integration of cloud services and GitLab through\nGitOps](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\nhas become very common. Applications are now continuously tested and\ndeployed through [continuous integration and delivery\n(CI/CD)](https://about.gitlab.com/topics/ci-cd/); cloud environments are\nmanaged in code through Infrastructure as Code (IaC) using tools like\nTerraform; and GitLab CI is used as a core tool to perform these GitOps\nprocesses.\n\n\nAt the same time, [software supply chain\nattacks](https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security/)\nhave increased. To reduce the risk of an attack, the use of OpenID Connect\n([OIDC](https://docs.gitlab.com/ee/integration/openid_connect_provider.html#introduction-to-openid-connect))\nauth is recommended, and GitLab 15.7 introduced [ID\ntokens](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html),\na mechanism for secure OIDC integration.\n\n\nHowever, OIDC integration can be complex for beginners and can be difficult\nto configure properly. Therefore, GitLab's Infrastructure Security Team has\ncreated a Terraform module for configuring Google Cloud and a CI template\nfor GitLab CI so GitLab CI and Google Cloud can be securely integrated.\n\n\nThis tutorial explains how to use [these OIDC\nmodules](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules).\n\n\n## Why OIDC?\n\nThe integration between Google Cloud and GitLab CI has often been done by\nadding a static key of the service account in Google Cloud to the\nenvironment variables of CI. However, this method has the following\nproblems:\n\n\n- The risk of compromise is high because the same key can be used to\nmanipulate the cloud environment over time.\n\n- Because static keys are portable, there is no link between the key and the\nenvironment in which it is used, making it difficult to identify where the\nkey is being used.\n\n\nOIDC authentication can solve the above problems by providing the following\nbenefits:\n\n- No need to issue static keys, eliminating the need for long-term key\nmanagement.\n  - It also eliminates the compliance need of rotating the secrets every few months.\n- Low risk of leakage due to temporary tokens issued.\n\n- Because the CI used is tied to the Google Cloud environment, it is\npossible to properly manage where the service account is used.\n\nIn addition, other settings such as CI and CD isolation can be configured\nusing [the claims provided by GitLab\nCI](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html).\n\n\n## OIDC authentication with Google Cloud\n\nThe OIDC integration between Google Cloud and GitLab CI works as follows:\n\n\n- Preparation (areas to configure in Terraform in OIDC models)\n  1. Create a service account in Google Cloud for CI integration and set up the appropriate roles.\n  1. Create a Google Cloud Workload Identity pool and provider, and configure integration with GitLab CI.\n  1. Assign the Workload Identity User role to the service account.\n\n\n\n\n![Simplified\ndiagram](https://about.gitlab.com/images/blogimages/2023-06-30-introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci/oidc-auth-diagram.png){:\n.shadow}\n\n\nGitLab CI in action (simplified by the GitLab CI template in OIDC modules)\n\n{: .note .text-center}\n\n\nGoogle Cloud authenticates using an ID token issued on GitLab CI, so there\nis no need to issue a Google Cloud service account key.\n\n\n## How to use a Terraform module\n\nThe process of configuring a Terraform module to establish a connection\nbetween Google Cloud and GitLab using OIDC is fairly simple. This module\ntakes care of the following steps:\n\n1. Create the Google Cloud Workload Identity Pool.\n\n1. Create a Workload Identity Provider.\n\n1. Grant permissions for service account impersonation.\n\n\nNote: Your account must have at least the Workload Identity Pool Admin\npermission on the Google Cloud project.\n\n\n```terraform\n\n# terraform\n\nmodule \"gl_oidc\" {\n source = \"gitlab.com/gitlab-com/gcp-oidc/google\"\n version = \"3.0.0\"\n google_project_id = GOOGLE_PROJECT_ID\n gitlab_project_id = GITLAB_PROJECT_ID\n oidc_service_account = {\n   \"sa\" = {\n     sa_email  = \"SERVICE_ACCOUNT_EMAIL\"\n     attribute = \"attribute.project_id/GITLAB_PROJECT_ID\"\n   }\n }\n}\n\n```\n\n\nThe above sample module can be used to configure OIDC. There are some\nadditional parameters that can be used to configure this module further (a\ndetailed list and description of those parameters can be found\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/#configure-gitlab-for-oidc-integration-using-terraform-module)).\n \nBy default, all branches of the project are authenticated to Google Cloud,\nbut you can specify more granular conditions, such as the branch name of the\ncommit that triggered the CI, or authenticating only with a specific tag.\n\n\nFurther settings can be made by changing the following attribute settings in\naccordance with the ID token claim:\n\n\n```\n  oidc_service_account = {\n    \"sa\" = {\n      sa_email  = \"SERVICE_ACCOUNT_EMAIL\"\n      attribute = \"attribute.project_id/GITLAB_PROJECT_ID\"\n    }\n```\n\n\nCode files for this module are available\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/terraform-modules/gcp-oidc).\n\n\n## How to use the CI template\n\n[The CI\ntemplate](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/blob/main/templates/gcp_auth.yaml)\nmakes GitLab CI very easy for Google Cloud OIDC authentication. This CI\ntemplate supports [Application Default\nCredentials](https://cloud.google.com/docs/authentication/application-default-credentials)\nand can be used from IaC such as Terraform, CLI such as gcloud, and SDKs in\nPython and Go.\n\n\nFor example, if you want to use the CI template for Terraform, you can\nwrite:\n\n\n```\n\n# You should upgrade to the latest version. You can find the latest version\nat\nhttps://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/releases\n\ninclude:\n  - remote: 'https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/raw/3.0.0/templates/gcp_auth.yaml'\n\nterraform:\n  image:\n    name: hashicorp/terraform:1.5.3\n    entrypoint:\n      - /usr/bin/env\n      - \"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"\n  extends: .google-oidc:auth\n  variables:\n    WI_POOL_PROVIDER: //iam.googleapis.com/projects/GOOGLE_PROJECT_ID/locations/global/workloadIdentityPools/WORKLOAD_IDENTITY_POOL/providers/WORKLOAD_IDENTITY_POOL_PROVIDER\n    SERVICE_ACCOUNT: SERVICE_ACCOUNT_EMAIL\n  script:\n    - terraform init\n    - terraform plan\n```\n\n\n### Required variables\n\n- WI_POOL_PROVIDER(under .google-oidc:) - Full canonical resource name of\nthe workload identity pool provider. This value must be written under\n.google-oidc: like this.\n\n- SERVICE_ACCOUNT - Service Account email address\n\n\nA detailed list and description of those parameters can be found\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/blob/main/README.md#using-oidc-in-pipelines).\n\n\nAs a note, you cannot use `before_script` in the job that uses this template\nbecause the way GitLab CI works will result in OIDC code being overwritten.\nCI template uses `before_script` to perform the initial configuration of\nOIDC.\n\n\nCode samples for this module are available\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/samples/ci/gcp).\n\n\n## Next steps\n\nThis article has introduced OIDC modules for OIDC integration and secure\nauthentication between Google Cloud and GitLab CI. In short, we are doing\nthe following steps:\n\n\n1. Setting up a service account\n\n1. Granting permissions to the service account\n\n1. Running the Terraform module\n\n1. Setting up CI pipeline\n\n\nYou can find the relevant sample for the above steps\n[here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/samples).\n\n\nAlso, GitLab is currently developing a [CI Catalog and CI\nComponents](https://about.gitlab.com/blog/use-inputs-in-includable-files/).\nWe plan to support them.\n\n\nThe GitLab Infrastructure Security Team will continue to improve the modules\nas we receive feedback, and we hope to consider and release components that\nmaintain a high level of security and usability for both internal and\nexternal use. \n\n\n## Read more\n\n- [Configure OIDC with GCP Workload Identity\nFederation](https://docs.gitlab.com/ee/ci/cloud_services/google_cloud/)\n\n- [Workload Identity Federation on Google\nCloud](https://cloud.google.com/iam/docs/workload-identity-federation)\n\n- [Terraform for\ngoogle_iam_workload_identity_pool_provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/iam_workload_identity_pool_provider)\n\n- [OIDC Authentication using ID\ntokens](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html)\n",[1146,109,979,9],{"slug":1893,"featured":6,"template":734},"introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci","content:en-us:blog:introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci.yml","Introduction Of Oidc Modules For Integration Between Google Cloud And Gitlab Ci","en-us/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci.yml","en-us/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci",{"_path":1899,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1900,"content":1906,"config":1911,"_id":1913,"_type":14,"title":1914,"_source":16,"_file":1915,"_stem":1916,"_extension":19},"/en-us/blog/kubernetes-101",{"title":1901,"description":1902,"ogTitle":1901,"ogDescription":1902,"noIndex":6,"ogImage":1903,"ogUrl":1904,"ogSiteName":720,"ogType":721,"canonicalUrls":1904,"schema":1905},"Getting Started with Kubernetes","Pods, nodes, clusters – oh my! Get the lowdown on Kubernetes from Brendan O'Leary's talk at Contribute 2019.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678474/Blog/Hero%20Images/clouds_kubernetes101.jpg","https://about.gitlab.com/blog/kubernetes-101","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting Started with Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-10-24\",\n      }",{"title":1901,"description":1902,"authors":1907,"heroImage":1903,"date":1908,"body":1909,"category":729,"tags":1910},[855],"2019-10-24","\nKube-uh-not-a-clue?\n\nIt's the most common response to anyone who hears the term “Kubernetes” for the first time. If Kubernetes is, quite literally, Greek to you, then this blog post and [the corresponding video](https://www.youtube.com/watch?v=rq4GZ_GybN8) are two good places to start.\n\nWhile at [Contribute 2019](/blog/how-we-scaled-our-summits/), senior solutions manager [Brendan O’Leary](/company/team/#brendan) gave a presentation explaining the nuts and bolts of Kubernetes and how we use this open source tool at GitLab.\n\n## What is Kubernetes?\n\n“[Kubernetes](https://kubernetes.io/) is an open source system for automating deployment, scaling, and management of containerized applications,” according to the [Cloud Native Computing Foundation (CNCF)](https://www.cncf.io/).\n\n>“You'll hear [Kubernetes] called a lot of different things. You'll hear people say, ‘Well, it's a container scheduler.’ You'll hear people say it's a desired state manager. You'll hear people say it's an orchestrator,” says Brendan. “All these things just basically boil down to it's _a system that keeps the system how we want it to be_, which sounds kind of crazy. But when we're a software company for software companies, you can relate a little bit.”\n\nSo a system that keeps the system how we want it to be, what does that mean exactly?\n\nTo understand what Kubernetes is and what it does, it’s best to dig a bit deeper into the origin story of this technology.\n\nThe journey to Kubernetes started at Google, when infrastructure developers were searching for a way to deploy new applications on hundreds of thousands of globally distributed servers. The result was Borg, a private tool developed by Google engineers for this purpose. The engineers iterated on Borg to launch Project Seven – an open source project that wasn’t entirely Borg, but took elements of Borg to produce version 1.0 of Kubernetes.\n\nKubernetes, which translates from Greek to “pilot,\" “helmsman,” or \"governor,\" is managed by CNCF, a foundation created by Google and Linux to house Kubernetes and other open source computing projects.\n\n## The benefits of Kubernetes\n\nKubernetes is highly portable across multiple cloud platforms and simplifies container management across however many of them are in use. Kubernetes make it easy to achieve greater scalability, flexibility, and productivity.\n\nAnother big benefit of Kubernetes is of course the fact that it’s open source – it’s continuously improved and updated so that there are minimal workflow interruptions.\n\n## What features does Kubernetes offer?\n\nKubernetes is one of the fastest growing open-source software projects around today. Here are some of the reasons why:\n\n* Deployments can be sent to one cloud or multiple cloud services without losing any application functionality or performance.\n\n* Kubernetes automation capabilities handle scheduling and deploying containers regardless of where it comes from (on-premise, cloud, or other). The automation also auto-scales up and down to increase efficiency and reduce waste and it creates new containers if dealing with a heavy workload.\n\n* Kubernetes allows for rolling back an application change if something goes wrong.\n\n* The open-source nature of Kubernetes lets users take advantage of a vast ecosystem of open-source tools.\n\n* The software is never outdated due to previously launched versions – it is always updating.\n\n## The role of containers\n\nContainers are a lightweight technology that lets you securely run an application and its dependencies without impacting other containers or your operating system (OS). This makes containers more nimble and scalable than using other tools for application management, like virtual machines (VMs) or bare metal. Like VMs, containers can repeat the application as it’s in development, but unlike VMs, the container does not duplicate the OS each time and instead shares the infrastructure, container technology (e.g., Docker), and OS with the host computer. Containers are lightweight and easier to run on the cloud because the OS is not duplicated along with the application, but container technology can be challenging to manage without a tool.\n\n“As you get more and more containers... it has a huge advantage technically, but it really creates a mess as to how are we managing all these containers,” says Brendan. “And there’s another problem – bare metal, virtual machines, containers, these all assume to some extent that you know what's going on with the computer that's running them.”\n\n![Evolution of containers](https://about.gitlab.com/images/blogimages/evolution_of_containers.png){: .shadow.medium.center}\nContainers make application deployment simpler, but containers are hard to orchestrate without a tool like Kubernetes.\n{: .note.text-center}\n\nBut orchestrating the various application deployments in containers is a level of abstraction that is difficult for the human mind to grapple with and is challenging to manage manually, which is where Kubernetes comes in.\n\n## Kubernetes as scheduler\n\nKubernetes is an open source container orchestrator that automates container management from deployment to scaling and operating.\n\nThere are a few key advantages to using Kubernetes, namely that the technology takes an extremely abstract method of application management – containers – and schedules the deployments to occur automatically.\n\nBrendan mentions other advantages to using Kubernetes, including that is can run routine health checks and is a very self-healing technology. A second key advantage to using Kubernetes for [DevOps](/topics/devops/) is that it is a declarative technology at its core. By using the [desired state manager](https://medium.com/@yannalbou/kubernetes-desired-state-4c5c4e873743), you can describe how you want your application to run and Kubernetes makes it happen.\n\n## Core Kubernetes concepts and definitions\n\n*   **Pod**: An abstraction that represents a group of one or more application containers. “The pod is just a unit that says these are the containers that represent the front end website, or these are the containers that represent the payment system,” explains Brendan.\n*   **Node**: A worker machine in Kubernetes that may be a VM or a physical machine (e.g., a computer), depending upon the cluster. The node often includes Docker, the pods (“group of containers”), and the VM or computer that includes the OS.\n*   **Cluster**: The highest level of abstraction in Kubernetes, it contains all the nodes, pods, and a **master** – which maintains the desired state of your application by orchestrating the nodes.\n*   **Service**: Defines a logical set of pods (e.g., “payment system”) and sets a policy about who can access them. “Pods come and go, but a service is forever,” or so the saying goes, Brendan says. “A pod is going to get scheduled into a node. But if that node went away, the fact that this pod is a member of this service means I've got to go find somewhere else to make a new pod that has this container running in it.” A service allows Kubernetes to route traffic to your application regardless of where the pod is running.\n\nThere are plenty of other buzzwords and phrases that are associated with Kubernetes, and Brendan dives into some of them in his presentation (captured in the video below). More concepts are explained on the [Kubernetes website at CNCF](https://kubernetes.io/docs/concepts/).\n\n## GitLab and Kubernetes\n\nThere are three key touchpoints between GitLab and Kubernetes:\n\n1. **GitLab is an application, so it can be run on Kubernetes.**\nIf a GitLab customer is already using a cloud native environment (i.e., containers and Kubernetes), then GitLab the application can be installed in that cloud native environment. We have already set-up [Helm Charts](https://docs.gitlab.com/charts/), which describe [how to install GitLab in a cloud native environment](https://docs.gitlab.com/charts/#installing-gitlab-using-the-helm-chart).\n2. **Customers that build their applications in GitLab using CI/CD can deploy to Kubernetes.**\nThe Configure team at GitLab works on the integration between GitLab and Kubernetes so developers can deploy their applications automatically to a Kubernetes cluster. [The GitLab and Kubernetes integration](https://docs.gitlab.com/ee/user/project/clusters/) allows customers to create and dismantle Kubernetes clusters, use review apps, run pipelines, deploy apps, view pod logs, detect and monitor Kubernetes, and much more. The Ops product teams at GitLab are always working to enhance the integration between Kubernetes and GitLab to make Auto DevOps faster and more efficient.\n3. **Moving our production system for GitLab.com to a Kubernetes cluster.**\nWe recently moved our giant GitLab.com application from Microsoft Azure [to a Google Cloud Platform](/blog/moving-to-gcp/). A key reason we changed platforms is that we wanted to move our GitLab.com project to a Kubernetes cluster. This project is ongoing but we are making major strides toward continuous deployment using Kubernetes.\n\n## Is Kubernetes easy to use?\n\nEveryone’s favorite answer: it depends. As with any software, there’s a learning curve to using Kubernetes (like having a basic understanding of how containers work). And it also may not be the right software fit for your needs. But if it is, adopting it doesn’t have to be complicated.\n\nKubernetes gives users the basic building blocks for creating developer projects while still allowing user flexibility where it’s needed. It can get a little more labor-intensive if users choose to build their own Kubernetes clusters rather than letting the service do it for them. But most companies don’t choose this route.\n\n## But wait, there’s more\n\nIf you have more questions, like why the heck Kubernetes is abbreviated to K8s, or are searching for more resources, you’re in luck. Brendan dives into more detail about some of the etymology, key concepts, vocabulary and even pop culture that shapes Kubernetes in his presentation. Watch the video below to learn more about how Kubernetes has been the impetus behind a major shift toward cloud native in the DevOps industry, and why we’re on the front lines of that change here at GitLab.\n\n### Watch\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/rq4GZ_GybN8\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n### Supplemental reading\n\n[Kubernetes and the open source community](/blog/kubernetes-chat-with-joe-beda/): A conversation between GitLab CEO [Sid Sijbrandij](/company/team/#sytses) and the co-creator of Kubernetes, Joe Beda.\n\n[Kubernetes and the future of cloud native](/blog/kubernetes-chat-with-kelsey-hightower/): Sid chats with Kelsey Hightower, Google staff developer advocate about cloud native.\n\n[Kubernetes, containers, cloud native – the basics](/blog/containers-kubernetes-basics/): Get a quick overview of the key Kubernetes concepts.\n\n[Kubernetes + GitLab](/solutions/kubernetes/): Explore how GitLab and Kubernetes interact at various touchpoints.\n\n[Cover Photo](https://unsplash.com/photos/9BJRGlqoIUk) by [Pero Kalimero](https://unsplash.com/@pericakalimerica?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/cloud?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,859],{"slug":1912,"featured":6,"template":734},"kubernetes-101","content:en-us:blog:kubernetes-101.yml","Kubernetes 101","en-us/blog/kubernetes-101.yml","en-us/blog/kubernetes-101",{"_path":1918,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1919,"content":1925,"config":1930,"_id":1932,"_type":14,"title":1933,"_source":16,"_file":1934,"_stem":1935,"_extension":19},"/en-us/blog/kubernetes-and-multicloud",{"title":1920,"description":1921,"ogTitle":1920,"ogDescription":1921,"noIndex":6,"ogImage":1922,"ogUrl":1923,"ogSiteName":720,"ogType":721,"canonicalUrls":1923,"schema":1924},"How Kubernetes merges with multicloud & how to manage it","Google Cloud's Ian Chakeres and Tim Hockin discuss how Kubernetes reduces cloud noise and makes multicloud possible.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681075/Blog/Hero%20Images/kubernetes-multicloud-blog.jpg","https://about.gitlab.com/blog/kubernetes-and-multicloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How Kubernetes merges with multicloud & how to manage it\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-02-05\",\n      }",{"title":1920,"description":1921,"authors":1926,"heroImage":1922,"date":1927,"body":1928,"category":815,"tags":1929},[772],"2020-02-05","\n\nIn November 2019, we had the opportunity to co-host [MulticloudCon](https://multicloudcon.io/), a zero-day event with our partners at [Upbound](https://upbound.io/). The event featured experts in cloud, Kubernetes, database resources, CI/CD, security, and more to learn how multicloud is evolving and empowering developers and operations experts across the industry.\n\nIn this presentation from MulticloudCon, Google Cloud's [Ian Chakeres](http://www.ianchak.com/) and [Tim Hockin](https://twitter.com/thockin) cover the challenges of using multiple clouds, and how Kubernetes cuts through the cloud noise to provide consistency in workflows. Gartner predicts that by 2021, [over 75% of midsize and large organizations will have adopted a multicloud or hybrid IT strategy.](https://www.gartner.com/en/documents/3895580/predicts-2019-increasing-reliance-on-cloud-computing-tra)\n\nAs organizations continue to amp up their [multicloud](/topics/multicloud/) initiatives, they’ll need ways to manage the complexities and differences between multiple cloud environments. Kubernetes is perfectly built for this task because it creates the right abstractions so teams can utilize multiple clouds on a consistent platform.\n\n\n## Discussion highlights\n\n### The challenges of multiple clouds:\n\n> \"The hard thing about multiple clouds is the noise. There's so much that is different across clouds. To learn them to the _depth_ that you need to be able to develop and debug real applications on these clouds is really, really difficult. Networking capabilities across clouds, across environments, are incredibly different and varied. Storage, auto-scaling, life cycle management, all of these things that have a real, material impact on the way you develop your applications. It can be total chaos for your staff.\" – Tim Hockin, Software Engineer, Kubernetes, Anthos, and GKE\n\n\n### Why Kubernetes is built for multicloud:\n\n> \"Kubernetes is this platform that is [at a] high enough level that it hides most of those variances that we see across all the different clouds. But it's also [at a] low enough level that you can do anything that you need to, for your business and your developers. Kubernetes provides these abstractions that insulate your teams from some of the mess below, hiding that infrastructure complexity that's associated with multiple clouds.\" – Ian Chakeres, Engineering Manager, Anthos and GKE\n\n### How open source continues to improve Kubernetes and multicloud:\n\n> \"Not only can you build the platform for your teams, but there is this entire ecosystem of people who are out there, in Kubernetes, building things that can help you run your business. I went to look at the [CNCF](https://www.cncf.io/) page recently, just to look at all the different projects, and even just the graduated project list now fills your entire screen. There's this entire ecosystem that builds the infrastructure and the applications... they can fill in the gaps if there are any things that your business is running into. So Kubernetes is giving you this leverage as being a platform that actually spans all of those other clouds.\" – Ian Chakeres\n\n## Kubernetes and multicloud\n\nNetworking across environments, clouds, and clusters remains challenging. Organizations don’t want to train DevOps teams on multiple clouds, and even if they did, training teams on the intricacies and fine details for _every single cloud provider_ would be an exercise in futility. Tailoring deployments for each cloud is inefficient and time-consuming. Kubernetes provides the consistency teams need to work with multiple clouds by creating abstractions that bring all deployments into one environment. Even though there are many exciting things happening in open source around Kubernetes and multicloud, not every abstraction is leak-proof.\n\nIn a perfect multicloud, multi-cluster hybrid world, teams are working with multiple providers in a seamless environment that hides the underlying infrastructure. It’s still a little too early for multicloud and hybrid Kubernetes to make that \"perfect\" world a reality, but as multicloud technology continues to evolve, Kubernetes will continue to be at its core.\n\nTo learn more about how the team at Google is investing in Kubernetes and multicloud, watch the full presentation below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/ArQL05VZ18U\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover image by Francisco Delgado on [Unsplash](https://unsplash.com/s/photos/multi-cloud?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,859],{"slug":1931,"featured":6,"template":734},"kubernetes-and-multicloud","content:en-us:blog:kubernetes-and-multicloud.yml","Kubernetes And Multicloud","en-us/blog/kubernetes-and-multicloud.yml","en-us/blog/kubernetes-and-multicloud",{"_path":1937,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1938,"content":1944,"config":1949,"_id":1951,"_type":14,"title":1952,"_source":16,"_file":1953,"_stem":1954,"_extension":19},"/en-us/blog/kubernetes-chat-with-joe-beda",{"title":1939,"description":1940,"ogTitle":1939,"ogDescription":1940,"noIndex":6,"ogImage":1941,"ogUrl":1942,"ogSiteName":720,"ogType":721,"canonicalUrls":1942,"schema":1943},"Kubernetes and the open source community: We chat with Joe Beda","Our CEO sits down with Kubernetes co-creator Joe Beda to talk about the future of open source.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680604/Blog/Hero%20Images/tech-explorers-cover.png","https://about.gitlab.com/blog/kubernetes-chat-with-joe-beda","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kubernetes and the open source community: We chat with Joe Beda\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-20\",\n      }",{"title":1939,"description":1940,"authors":1945,"heroImage":1941,"date":1946,"body":1947,"category":729,"tags":1948},[772],"2019-05-20","\n\nJoe Beda is the Principal Engineer at VMWare and co-creator of Kubernetes. Beda and Craig McLuckie’s Google project to build a container orchestration tool has exploded and Kubernetes is now a large, open source community with thousands actively contributing to the project thanks to the [Cloud Native Computing Foundation](https://cncf.io/). In the world of open source they don’t get much better than Joe Beda, which is why we were thrilled to speak with him as part of our TechExplorers series where we sit down with the industry’s tech leaders.\n\nJoe and GitLab CEO [Sid Sijbrandij](/company/team/#sytses) went over a variety of topics like cloud native, Kubernetes, the business of open source, and many others. What was most interesting, but not surprising, was the integral role the open source community had in the success of these projects.\n\n“I think open source is evolving… It’s never been something that’s sat still. One of the lessons from Kubernetes more than anything else is that open source today is about community, if not more than code,” Beda says. He admits that right now is a tumultuous time for open source, with the line between product and project getting blurred. The “business” of open source can sometimes alienate the community that supported these initiatives in the first place, something many leaders will have to navigate in the years ahead.\n\n“It’s like there’s the code and the license for the code, and then there’s the community that builds around it. And even if it’s not a legal contract, I think there’s a social contract between the leaders of an open source project and the people who are members of that community. And I think you have to be very respectful of that social contract.”\n\nOne of the most important things an open source project can do to maintain the trust of the community, according to Beda, is to be very explicit about its motivations from the beginning. At GitLab, we’ve taken this message to heart and have [our promises to the open source community](https://handbook.gitlab.com/handbook/company/stewardship/) public on our website.\n\nKubernetes has already made a major impact on the way we deploy applications, and users continue to contribute and add to the project. “I think I’m still blown away with just the diversity of the projects that are building on top of Kubernetes,” he says. Even with recent challenges, Beda’s encouraged at the innovation he continues to see in open source. It all boils down to buy-in from the community and giving them the tools to keep innovating. “I think this is part of the excitement... There is a really vibrant set of projects that are experimenting, trying things out. And it’s going to be the users who decide what’s successful here.”\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6IlyxHFedpo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nVideo directed and produced by [Aricka Flowers](/company/team/#arickaflowers)\n{: .note}\n\n\n## Takeaways\n\n\n### On the future of open source:\n\n>“I think open source is evolving… It’s never been something that’s sat still. One of the lessons from Kubernetes more than anything else is that open source today is about community, if not more than code.”\n\n\n### On building an open source company:\n\n>“My advice to anybody who is building a company around open source is to understand sort of where are your levers, where is the value that you’re adding, and try and be creative about finding ways to add value where something like this can’t happen.”\n\n\n### On the early days of Kubernetes:\n\n>“The real story is that there was a set of us that just wanted to be able to hack on some stuff and not have to go through all the process of shipping stuff to Google… But also we very much had the idea from the start that we wanted to build a community. We wanted to enable other people to own it, to be part of it, to really feel like they were instrumental in making it happen. And that’s what happened.”\n\n\n### On enterprise cloud adoption:\n\n>“I think that as we start to see these enterprises start to adopt cloud, understanding the power dynamics and the relationship with cloud, I think that there is a lot of concern about how do I get some independent advice, independent thought, independent support that’s going to actually stay with me as I figure out where my position lands as I move from on-prem to cloud and beyond.”\n\nWe’ll be at KubeCon Barcelona May 20 – 23, booth #S21. Learn how you can get started with GitLab and Kubernetes, and be sure to check out Joe Beda’s keynote on May 21.\n",[563,9,859],{"slug":1950,"featured":6,"template":734},"kubernetes-chat-with-joe-beda","content:en-us:blog:kubernetes-chat-with-joe-beda.yml","Kubernetes Chat With Joe Beda","en-us/blog/kubernetes-chat-with-joe-beda.yml","en-us/blog/kubernetes-chat-with-joe-beda",{"_path":1956,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1957,"content":1962,"config":1967,"_id":1969,"_type":14,"title":1970,"_source":16,"_file":1971,"_stem":1972,"_extension":19},"/en-us/blog/kubernetes-chat-with-kelsey-hightower",{"title":1958,"description":1959,"ogTitle":1958,"ogDescription":1959,"noIndex":6,"ogImage":1941,"ogUrl":1960,"ogSiteName":720,"ogType":721,"canonicalUrls":1960,"schema":1961},"Kubernetes and the future of cloud native: We chat with Kelsey Hightower","Our CEO sits down with Google Staff Developer Advocate Kelsey Hightower to talk fundamentals, the future of cloud native, and Kubernetes.","https://about.gitlab.com/blog/kubernetes-chat-with-kelsey-hightower","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kubernetes and the future of cloud native: We chat with Kelsey Hightower\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-05-13\",\n      }",{"title":1958,"description":1959,"authors":1963,"heroImage":1941,"date":1964,"body":1965,"category":729,"tags":1966},[772],"2019-05-13","\n\n[Kelsey Hightower](https://twitter.com/kelseyhightower) is a Staff Developer Advocate at Google, co-chair of KubeCon, the largest Kubernetes conference, and an avid open source technologist. Naturally, we couldn’t think of a better first subject for TechExplorers, a new blog series where we talk to the industry’s tech leaders.\n\nGitLab CEO [Sid Sijbrandij](/company/team/#sytses) sat down with Kelsey to talk about a variety of topics like cloud native, Kubernetes, infrastructure challenges, understanding new technology, and much more. One topic that came up again and again was fundamentals. Even with so many new technologies and methodologies out there – Kubernetes, [serverless](/topics/serverless/), cloud native – the basics of computing remain the same. It’s only when we understand the fundamentals and commit to building reliable code that we can make the most of these new platforms.\n\nOne of the biggest challenges Kelsey sees is the “all-or-nothing” approach. “Either I’m all serverless, or I’m all Kubernetes, or I’m all traditional infrastructure. That has never made sense in the history of computing,” he says. Ultimately, you don’t have to choose: Pick the platforms that work best for the job.\n\nGoing forward, Kelsey hopes that development continues to focus on high-level interfaces and hide the infrastructure underneath. Organizations want to have as little interaction with servers as possible. “That is what we’re trying to do. Anything more than that is noisy, and it’s kind of serving our own self-interest … We need those creative people not to be wasting time trying to build up a cloud platform before they can solve real problems.”\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9OHNejqXOoo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nVideo directed and produced by [Aricka Flowers](/company/team/#arickaflowers)\n{: .note}\n\n## Takeaways\n\n### On early Kubernetes:\n\n>\"... When it first came out, just based on my previous experience as a system administrator, this is the thing you’re trying to build all those years. So, when I saw it, I immediately knew this thing solves my problems. So, I think I kind of attacked it as a contributor first. And someone who wanted to teach other people what I saw in it. Not sure if it was ever going to blow up or not. But it definitely had the right footprint when it came out.\"\n\n### On teaching others:\n\n>\"I usually try to explain things based on the fundamentals, and then break down the technology until we get to the bottom. So, whenever something new comes out, my guess is it’s not going to change how we do computing. That hasn’t changed in a long time ... Once you learn the three, four, five basic fundamentals, then you just look at the new technology, and you just work your way down.\"\n\n### On invisible infrastructure:\n\n>\"Forever, people have tried to build a thing where most of the organization **doesn’t think about servers**. So whether you’re using Kubernetes, or virtualization for that matter, the whole goal is that if I check in code, there should be very little interaction with infrastructure to get that deployed to customers. To me, serverless is just a reminder to us that we should focus on a high-level interface and hide the various infrastructure underneath.\"\n\n### On adopting cloud native platforms:\n\n>\"If you take your app that you wrote 20 years ago and neglect it all this time, you don’t have any of those kind of controls, and you just move that app into the cloud native type of design patterns, it’s going to be worse than what you had before … People have to understand that there’s tradeoffs. You’re going to have to _write more reliable code_ if you expect to be able to adopt these platforms.\"\n\n## On monoliths:\n\n>\"There’s nothing wrong with monoliths, honestly. People have gotten themselves in a spot where they can’t really update the code. It’s messy. The codebase is all over the place. And if you take that same mentality to functions, you’re just going to have a mess of functions that are going to be all over the place and not even know how to call them.\n\n>\"_Discipline is required no matter what the platform is._ People think platform will absolve them from discipline.\"\n",[563,9,859],{"slug":1968,"featured":6,"template":734},"kubernetes-chat-with-kelsey-hightower","content:en-us:blog:kubernetes-chat-with-kelsey-hightower.yml","Kubernetes Chat With Kelsey Hightower","en-us/blog/kubernetes-chat-with-kelsey-hightower.yml","en-us/blog/kubernetes-chat-with-kelsey-hightower",{"_path":1974,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1975,"content":1981,"config":1986,"_id":1988,"_type":14,"title":1989,"_source":16,"_file":1990,"_stem":1991,"_extension":19},"/en-us/blog/kubernetes-kubecon-barcelona",{"title":1976,"description":1977,"ogTitle":1976,"ogDescription":1977,"noIndex":6,"ogImage":1978,"ogUrl":1979,"ogSiteName":720,"ogType":721,"canonicalUrls":1979,"schema":1980},"See you at KubeCon Barcelona!","We're excited to see you all in Barcelona! Visit us at booth S21.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664107/Blog/Hero%20Images/tanuki-adventure.png","https://about.gitlab.com/blog/kubernetes-kubecon-barcelona","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"See you at KubeCon Barcelona!\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Priyanka Sharma\"}],\n        \"datePublished\": \"2019-05-17\",\n      }",{"title":1976,"description":1977,"authors":1982,"heroImage":1978,"date":1983,"body":1984,"category":729,"tags":1985},[1079],"2019-05-17","\nKubeCon is here again! I am very excited to go to Barcelona and meet (some of) the 12,000 attendees expected at the show. I’ve been part of KubeCon since the second event when there were 700 attendees. That year, we were a cozy community with about five projects, and Kubernetes was the newest game in town. Fast forward to today and I now serve on the board of the CNCF, Kubernetes is a stable technology, the foundation hosts 36 projects, and the latest of them to graduate will be Fluentd (after Kubernetes, Prometheus, CoreDNS, Envoy, and Containerd). I can’t quite reveal it yet, but there will be a very cool GitLab story intertwined with one of the projects that you will see for yourself soon :-).\n\n\u003Cscript type=\"text/javascript\" src=\"https://ssl.gstatic.com/trends_nrtr/1754_RC01/embed_loader.js\">\u003C/script> \u003Cscript type=\"text/javascript\"> trends.embed.renderExploreWidget(\"TIMESERIES\", {\"comparisonItem\":[{\"keyword\":\"kubernetes\",\"geo\":\"\",\"time\":\"today 5-y\"}],\"category\":0,\"property\":\"\"}, {\"exploreQuery\":\"date=today%205-y&q=kubernetes\",\"guestPath\":\"https://trends.google.com:443/trends/embed/\"}); \u003C/script>\n*\u003Csmall>Kubernetes growth over the past 5 years.\u003C/small>*\n\nAs some of you know, I joined GitLab after following the company and our CEO, Sid Sijbrandij, for a long time. Working at this dynamic company has been a ride of a lifetime. I am an open source person and one of the interesting things for me is how the [GitLab story](/company/history/) is similar to the Kubernetes story. GitLab started as an open source git provider because our co-founder, [Dmitriy \"DZ\" Zaphorozhets](/company/team/#dzaporozhets) didn’t like his options. Today, we have morphed into a [single application for the entire DevOps lifecycle](/stages-devops-lifecycle/). Similarly, Kubernetes comes from humble beginnings. In the words of Joe Beda, co-founder of Kubernetes, “there were a set of us that just wanted to be able to hack on some stuff and not have to go through all the process of shipping stuff to Google...it was more important for us to sort of reset the playing field between clouds. And so Kubernetes became a way for us to start doing that.”\n\nIt’s exciting to watch Kubernetes grow into the default container orchestration platform but I believe the best is yet to come: When the technology truly shifts left and every developer has access to it. That’s where GitLab comes in. With it’s deep focus on the developer workflow, the product brings efficiency, collaboration, and governance to teams sprawling the world wide web (a la GitLab itself) or small groups working out of a garage. When everything’s in the MR, everything is accessible including details on your kubernetes pods. I invite you to learn more about how we [integrate with Kubernetes](/solutions/kubernetes/).\n\n> “The only way in my opinion to make it easier for most end users to have a \"cloud-native\" experience is to provide a more end-to-end platform, a way that people can come together and they can edit code and review code and then actually do CI on that code and get that code shipped out to containers and have it be run with appropriate load balancing and observability.” — Matt Klein, Systems Engineer at Lyft\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/w0cZuG2Fcwo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n*\u003Csmall>Video directed and produced by [Aricka Flowers](/company/team/#arickaflowers).\u003C/small>*\n\n## Let's connect!\n\n[Meet us at booth S21](https://about.gitlab.com/events/) for CI office hours, tanuki adventures, and iPad giveaways!\n\nI'd love to help any CNCF projects (and other folks!) consider [GitLab CI](/solutions/continuous-integration/). If you are interested, [DM me on Twitter](https://twitter.com/pritianka) and we can sit down and discuss.\n\n## Join us for these events\n\n### Monday, May 20\n\n#### Cloud-Native Transformation Summit Hosted by Sysdig | 9:00 am - 12:15 pm\n\nJoin Priyanka Sharma, Director of Technical Evangelism at GitLab, at this zero day KubeCon event. This event will look at how enterprise organizations are moving into production-level Kubernetes and transforming their applications and infrastructure operations into Cloud-Native technologies.\n[Learn more here](https://go.sysdig.com/cloud_native_transformation_summit_2019.html).\n\n#### Zero Trust in the Cloud Native Era at Cloud Native Security Day | 11:00 - 11:30 am\n\nPriyanka Sharma, Director of Technical Evangelism at GitLab covers zero trust in the era of cloud native. [Register here](https://go.twistlock.com/cloudnativesecurityday#agenda).\n\n#### The Future of CI/CD with Kubernetes | 2:40 - 3:20 pm\n\nJoin Dan Lorenc, Software Engineer at Google, Carlos Sanchez, Principal Software Engineer at CloudBees, and Priyanka Sharma, Director of Technical Evangelism at GitLab, and Rob Zuber, CTO at CircleCI for a discussion on the future of CI/CD with Kubernetes.[Learn more here](https://sched.co/N6FQ).\n\n#### Barcelona Free Software Meetup: Working in the Open with GitLab, Kubic with openSUSE | 7-9 pm\n\nJoin Jason Plum, a Senior Software Engineer, Distribution at GitLab, for a talk on GitLab’s open-core product. He’ll discuss contributing back to the community directly, as well as sharing insights on changing from Closed to Open.\n[RSVP here](https://www.meetup.com/Barcelona-Free-Software/events/260656266/).\n\n### Tuesday, May 21\n\n#### Tutorial: Cloud-Agnostic Serverless - Sebastien Goasguen, TriggerMesh & Priyanka Sharma, GitLab | 11:05 am - 12:30 pm\n\nIn this tutorial, we will leverage Knative, Google's Kubernetes-based open source platform to build, deploy, and manage modern serverless workloads. We will push serverless functions and apps to production on any cloud of choice and switch the provider as necessary. We will leverage GitLab and TriggerMesh technology in the tutorial and also share how developers can use other options.\nSign up for the tutorial through the KubeCon schedule [here](https://sched.co/MPgx).\n\n#### Multicloud 360 Event | 8:30 pm - Midnight\n\nJoin GitLab, Upbound, DigitalOcean, Google Cloud and CockroachDB for 360 views of Barcelona and a discussion of multicloud. [RSVP here](https://www.eventbrite.com/e/multicloud-360-tickets-60623662005) to reserve your spot.\n\n### Wednesday, May 22\n\n#### The Serverless Landscape and Event Driven Futures - Dee Kumar, Linux Foundation & Priyanka Sharma, GitLab | 2:00 -2:35 pm\n\nThere is a lot of curiosity and confusion around [serverless computing](/topics/serverless/). What is it? Who is it for? Is it a replacement for IaaS, PaaS, and containers? Does that mean the days of servers are over? The CNCF created the Serverless Working Group to explore the intersection of cloud native and serverless technology. [Learn more here](https://sched.co/MPeI).\n\n## Play #tanukiadventure\n\nJoin our #tanukiadventure! Grab your game card at our booth S21 to help guide your adventure in finding GitLab's partners. At each adventure stop, learn how they work with GitLab! Once complete, each partner will provide you with an exclusive GitLab collectible pin to celebrate our awesome partnership! The first 50 attendees to collect all 8 unique Tanuki pins will win our prized GitLab Tanuki hoodie!\n",[1124,9,278,859,731],{"slug":1987,"featured":6,"template":734},"kubernetes-kubecon-barcelona","content:en-us:blog:kubernetes-kubecon-barcelona.yml","Kubernetes Kubecon Barcelona","en-us/blog/kubernetes-kubecon-barcelona.yml","en-us/blog/kubernetes-kubecon-barcelona",{"_path":1993,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":1994,"content":2000,"config":2005,"_id":2007,"_type":14,"title":2008,"_source":16,"_file":2009,"_stem":2010,"_extension":19},"/en-us/blog/kubernetes-terminology",{"title":1995,"description":1996,"ogTitle":1995,"ogDescription":1996,"noIndex":6,"ogImage":1997,"ogUrl":1998,"ogSiteName":720,"ogType":721,"canonicalUrls":1998,"schema":1999},"Understand Kubernetes terminology from namespaces to pods","Kubernetes can be a critical piece of successful DevOps but there's a lot to learn. We explain the terms and share a hands-on demo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670635/Blog/Hero%20Images/kubernetesterms.jpg","https://about.gitlab.com/blog/kubernetes-terminology","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand Kubernetes terminology from namespaces to pods\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-30\",\n      }",{"title":1995,"description":1996,"authors":2001,"heroImage":1997,"date":2002,"body":2003,"category":815,"tags":2004},[934],"2020-07-30","\n\n_If you're brand new to Kubernetes, you'll want to start with our [Kubernetes 101 guide](/blog/kubernetes-101/)._\n\nKubernetes and containers are often seen as two key elements in a [successful DevOps practice](/topics/devops/). But there's no question that Kubernetes can be intimidating to those not familiar with it. In fact, our [2020 Global DevSecOps Survey](/developer-survey/) found just 38% of respondents are actively using Kubernetes today while 50% are not. Anecdotally though, interest in Kubernetes is very high:\n\n_\"We are on the path to get our monolithic server into a sert of microservices and the goal is to use Kubernetes to help on this side.\"_\n\n_\"We're trying to get there.\"_\n\n_\"It's a priority for our platform team.\"_\n\nThis past spring staff distribution engineer [Jason Plum](/company/team/#WarheadsSE) and senior distribution engineer [Gerard Hickey](/company/team/#ghickey) walked attendees at GitLab's company-wide meeting Contribute through something they called _Kubernetes 102_ that looked at the practical building blocks required for a cloud-native application on [Kubernetes](https://kubernetes.io). As Jason puts it in the [video](https://www.youtube.com/watch?v=jdKXBJLHP8I&feature=emb_title), \"what we're trying to do here is to not just say, 'Look at all the magic we do' but actually explain the things we're doing right.\" Although this was a \"laptops out\" demo, here's a look at the key concepts and Kubernetes terminology you'll need to understand followed by a link to the entire presentation if you'd like to dive right in.\n\n## Start with containers\n\nA container is not a jail, but a jail is a container, Jason explains. \"A container is a way of packaging an application so that it is portable. It's contained, hence (the term) 'container' and it's immutable. It's the runtime requirements to actually execute and package that up in an immutable form that you can hand to someone.\"\n\nBut containers can have a tendency to get out of hand so you need something to help keep track. That's where Kubernetes comes in, Jason says in the presentation. \"So what is Kubernetes at a high level? I've seen orchestrator, I've seen management system and I've seen coordinator. Kubernetes is all of those things.\"\n\nKubernetes weaves both containers and software-defined networking together, creating \"a platform you can deploy onto with a clear syntax,\" Jason says. \"That syntax is replicable and not vendor bound so that you can deploy it anywhere that supports the official behaviors. Its job is to start containers, keep them running and make sure they're still running. That's what its job is really about.\"\n\n## Unpacking the moving parts\n\nIf you want to get more familiar with Kubernetes, it helps to understand the unique terminology, Jason stresses. Here are key terms that will help to explain the processes involved in running Kubernetes:\n\n**Namespaces**: In Kubernetes, [the namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) is effectively your working area. It's like a project in GCP or a similar thing in AWS.\n\n**Pods**: [A pod](https://kubernetes.io/docs/concepts/workloads/pods/) is effectively a unit of work. It is a way to describe a series of containers, the volumes they might share, and interconnections that those containers within the pod may need. You can have a pod that has a single container in it (or more than one container). Pods are flexible, too: Update one and it becomes version two, and version one is taken out, giving you a rolling update. As Jason spells out, \"It gives us a way to say, 'I always want to have three and still be able to migrate an application live from one version to another version without having downtime.'\n\n**Service**: Kubernetes \"has a concept of [a service](https://kubernetes.io/docs/concepts/services-networking/service/),\" Jason says. \"It can be thought of as like a load balancer for pods. It knows which pods are alive, healthy, and ready to respond so that when we try to access whatever pod we want to get to instead of to connect to the deployment and getting the one we get, and then always asking that pod for work.\"\n\n**Ingress**: This works with the service to make sure everything ends up in the right place. [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) can also provide load balancing.\n\n**ConfigMaps**: This is an API object for storing information in key-value pairs. \"A [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) is very useful for doing things like pre-stashing environment variables or files that can actually be mounted directly into pods without actually having to have an actual file system somewhere,\" Jason says, adding that they're not meant for confidential data.\n\n**Secrets**: [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) are an object and a place to store confidential information as the name implies.\n\nNow that you have the Kubernetes terminology down, watch the entire presentation here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/jdKXBJLHP8I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Read more about Kubernetes**:\n\n* [Keep your Kubernetes runners moving](/blog/best-practices-for-kubernetes-runners/)\n\n* Set up GitLab CI/CD on [Google Kubernetes Engine](/blog/gitlab-ci-on-google-kubernetes-engine/) in 15 minutes!\n\n* Create a [Kubernetes cluster](/blog/gitlab-eks-integration-how-to/) on Amazon EKS\n\nCover image by [Matti Johnson](https://unsplash.com/@matti_johnson) on [Unsplash](https://unsplash.com)\n{: .note}\n\n## Read more on Kubernetes:\n\n- [How to install and use the GitLab Kubernetes Operator](/blog/gko-on-ocp/)\n\n- [Threat modeling the Kubernetes Agent: from MVC to continuous improvement](/blog/threat-modeling-kubernetes-agent/)\n\n- [How to deploy the GitLab Agent for Kubernetes with limited permissions](/blog/setting-up-the-k-agent/)\n\n- [A new era of Kubernetes integrations on GitLab.com](/blog/gitlab-kubernetes-agent-on-gitlab-com/)\n\n- [What we learned after a year of GitLab.com on Kubernetes](/blog/year-of-kubernetes/)\n",[859,563,9],{"slug":2006,"featured":6,"template":734},"kubernetes-terminology","content:en-us:blog:kubernetes-terminology.yml","Kubernetes Terminology","en-us/blog/kubernetes-terminology.yml","en-us/blog/kubernetes-terminology",{"_path":2012,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2013,"content":2019,"config":2026,"_id":2028,"_type":14,"title":2029,"_source":16,"_file":2030,"_stem":2031,"_extension":19},"/en-us/blog/leah-petersen-user-spotlight",{"title":2014,"description":2015,"ogTitle":2014,"ogDescription":2015,"noIndex":6,"ogImage":2016,"ogUrl":2017,"ogSiteName":720,"ogType":721,"canonicalUrls":2017,"schema":2018},"From motorcycle stunter to DevOps: Finding love for CI/CD","Switching to GitLab helped a newly minted DevOps engineer grasp the concept of CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663760/Blog/Hero%20Images/image-for-leah-post.jpg","https://about.gitlab.com/blog/leah-petersen-user-spotlight","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Motorcycle stunter turned DevOps engineer says GitLab helped her learn to \"love\" CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-06-21\",\n      }",{"title":2020,"description":2015,"authors":2021,"heroImage":2016,"date":2023,"body":2024,"category":815,"tags":2025},"Motorcycle stunter turned DevOps engineer says GitLab helped her learn to \"love\" CI/CD",[2022],"Aricka Flowers","2018-06-21","\nWhen professional motorcycle stuntwoman turned developer Leah Petersen switched from Jenkins to GitLab, she was a bit nervous to say the least. Having only worked in tech for nine months, the [Samsung SDS](https://www.samsungsds.com/us/en/index.html) engineer was not enthused about the prospect of having to learn a new application after feeling like she had “just started to get competent” with Jenkins.\n\nAfter a self-described mini pity party, she dove into GitLab head first, jumping into a few big ticket projects to get a handle on the landscape. Within a few short months, Petersen was so impressed by her GitLab CI/CD experience that she felt the need to shout her newfound “love” for continuous integration and continuous delivery from the virtual mountaintop of [her blog](https://leahnp.github.io/2018/moving-from-jenkins-to-gitlab-CI/).\n\nWe recently met up with Petersen to learn more about her transition to the tech world and experience with GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Avx_RftRT_o\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Q & A with Leah Petersen, DevOps Engineer\n\n**Where do you work and what does your team do?**\n\nI work for a team in Samsung SDS called the Cloud Native Computing Team, and I'm [a DevOps engineer](https://about.gitlab.com/topics/devops/what-is-a-devops-engineer/). We deal primarily with containers in Kubernetes and helping companies modernize and move to the cloud. My team is super unique. We were kind of treated like an incubated startup within Samsung, so we're really given a lot of autonomy to make our own decisions.\n\nOur team was put together about five years ago, and Samsung really made a bet on Kubernetes being the future of orchestrating huge workloads in the cloud. Initially, we were focusing mainly on research and development, contributing to the Kubernetes community and learning who was a part of it, what their motives were, and how we could find our place in it. Over the last year, Samsung has really pivoted our role in the company, and we're looking at how we can help Samsung as a global organization move to Kubernetes and containers.\n\n**Where did you work before Samsung?**\n\nI was a motorcycle stunt rider before I became an engineer, and that career kind of organically grew out of my passion for motorcycles. I started stunting, loved the community and was able to meet people all over the country and travel. Being one of the few women who did it, I organically started getting calls for jobs and gigs. I thought, “If I can do this in my 20s and make this my full-time career, I'm definitely going to take a shot at it,” so I did.\n\nIt was an amazing opportunity and experience to travel the world and meet people all over this planet who are passionate about this crazy thing that I'm also passionate about. And I got to work with a lot of amazing brands and raise awareness about the sport that I love. So, I don't have any regrets about that and cherish the time that I got to spend on a motorcycle professionally.\n\n**How did you move from being a professional motorcycle stunter to a DevOps engineer?**\n\nI had been looking for a new career path and wasn't really sure what I was going to do. I knew that I wanted to build some tangible skills. I wanted skills that had a clear market value, and tech definitely provides that.\n\nI ended up taking an online coding course in Python, and had this “aha” moment where I realized, not only can I do this, which I didn't think was previously possible, but it's fun; I really like solving these problems. At that point I started taking more online courses and learning as much as I could for free. Then I ended up finding [Ada Developers Academy](https://www.adadevelopersacademy.org/), and that was the perfect segue into the industry.\n\n> I had this “aha” moment where I realized, not only can I do this, which I didn't think was previously possible, but it's fun\n\n**Can you describe how your experience has been as woman in tech?**\n\nYou definitely get a lot of strange reactions being a woman in tech. Walking into a situation, oftentimes people are surprised you're an engineer. You'll get reactions like, “Oh, I thought you were a project manager,” or, “I thought you were a recruiter,” or whatever other stereotype that you brought into the room. That can be discouraging and makes you feel unwelcome in that space. But I think we need women in every part of tech: frontend, backend, DevOps, operations, everything. If your interest is in UX, go for that. But don't let all the men who've been in the industry for 25 years on the operations side of things scare you off either. I really think we need diverse minds and approaches to problems in the whole spectrum of it.\n\nSometimes I forget about the gender disparity in tech because my team, specifically, has a couple of really amazing women who I get to work with every day. So, I'm very fortunate. But I recently went to KubeCon in Copenhagen, and it's a amazing conference with so much energy, but it's a real wake up call when you see the gender disparity there. There's 4,000 guys walking around and you feel like you stick out [or] when you're sitting in an auditorium, look around and realize, “Oh, I'm the only lady here.” It's something that you can't look away from.\n\n**Why did you decide to go into DevOps engineering?**\n\nIn my boot camp classes we were focusing on web development and building Ruby on Rails and Node.js apps. We each had an opportunity to do an internship at companies in Seattle that support the Ada program. Samsung was one of them, and they came in to do a presentation about their involvement in open source and Kubernetes. I had no idea what they were talking about, but Kubernetes and the momentum of the open source community was really appealing to me. So I took a chance and picked Samsung, dove right in, and found my way as I went along. I'm really happy that I chose Kubernetes and to specialize in the cloud.\n\n>Kubernetes and the momentum of the open source community was really appealing to me. So I took a chance, dove right in, and found my way as I went along\n\n**How did you get started with GitLab CI/CD? And how would you describe your transition to the application?**\n\nI always felt like I was fighting with the CI platform we were on prior to GitLab. It was never really functioning how we wanted it to, and something was always kind of failing. The whole reason you have CI/CD is to get visibility into what's happening with your code, right? You want to run your code through this pipeline and make sure there are no bugs, that you’re packaging it correctly and putting it in the places that you need it to be in production. It's this hugely critical component of going from the developer's computer to the world; that's the pipeline. So you really need the visibility to see what is happening every step of the way.\n\nOn the old system, I felt that I just didn't have that visibility. I was digging for the problems and not able to understand where they were coming from, where they were originating from, why they were happening or how to fix them. I feel like GitLab definitely does a great job of assisting the user in finding the origin of a problem, tracing that step back and making it clear where your issues are and when you're having success.\n\n**How has using GitLab impacted your career and workflow?**\n\nThere's a lot of talk about accessibility and user experience in tech. And we all know what it's like to have a bad user experience with a piece of technology; it's the most frustrating thing in the entire world. As a developer, you deal with lots of different tech every single day. When I started using GitLab about a year and a half into my career, it was certainly the first platform where I was like, ‘I feel so at home here. Everything’s fluid. I can find where everything is. I understand what everything is.’ There aren't these big black holes of confusion that have me asking, “Why does this exist and what am I doing here?’”\n\nWith GitLab, everything is just this cheery, happy place. And I really appreciate how it has now set the bar for me when it comes to the way in which a technology should function when I’m working with it.\n\nCover photo by [Rendiansyah Nugroho](https://unsplash.com/photos/JUePy_-uOSI) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[1634,859,9,731,1792,268,563,1041],{"slug":2027,"featured":6,"template":734},"leah-petersen-user-spotlight","content:en-us:blog:leah-petersen-user-spotlight.yml","Leah Petersen User Spotlight","en-us/blog/leah-petersen-user-spotlight.yml","en-us/blog/leah-petersen-user-spotlight",{"_path":2033,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2034,"content":2039,"config":2044,"_id":2046,"_type":14,"title":2047,"_source":16,"_file":2048,"_stem":2049,"_extension":19},"/en-us/blog/live-from-commit-london",{"title":2035,"description":2036,"ogTitle":2035,"ogDescription":2036,"noIndex":6,"ogImage":744,"ogUrl":2037,"ogSiteName":720,"ogType":721,"canonicalUrls":2037,"schema":2038},"Live from Commit London","We're having a packed day at our first European user conference. Watch this space for the latest news.","https://about.gitlab.com/blog/live-from-commit-london","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Live from Commit London\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2019-10-09\",\n      }",{"title":2035,"description":2036,"authors":2040,"heroImage":744,"date":2041,"body":2042,"category":300,"tags":2043},[934],"2019-10-09","\n**9:30AM BST** – GitLab CEO [Sid Sijbrandij](/company/team/#sytses) told attendees at our first European user conference that support for Amazon Web Services' Elastic Kubernetes Service (EKS) will be available later this year. Sid also underscored the importance of the European market. Almost one-third of GitLab's business comes from Europe and 42% of our customers are based in Europe.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">Gitlab Commit London warming up with breakfast networking 🤜🏻💥🚀 cc \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/gitlabcommit?src=hash&amp;ref_src=twsrc%5Etfw\">#gitlabcommit\u003C/a> \u003Ca href=\"https://t.co/ke7nsNE7pO\">pic.twitter.com/ke7nsNE7pO\u003C/a>\u003C/p>&mdash; James McLeod (@mcleo_d) \u003Ca href=\"https://twitter.com/mcleo_d/status/1181849833604337667?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n**10:00AM BST** – Speed matters, particularly at Porsche AG. Software engineers Alberto Gisbert and Dennis Menge told Commit 2019 attendees how a quest to improve collaboration, reduce tool complexity and achieve a single source of truth led the car manufacturer to GitLab. Porsche started using GitLab in Europe initially, but quickly realized it needed to expand to China, Porsche's largest market, as well. One year into the project, Porsche has more than 660 repositories with more than 250 active users. All told, more than 80,000 pipelines have been triggered.\n\nUp next, Capgemini UK's [Matt Smith](https://twitter.com/Harmelodic) shared how to go from [Zero to K8s: As Fast As Possible](https://gitlabcommit2019london.sched.com/event/UL5X/zero-to-k8s-as-fast-as-possible):\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-conversation=\"none\">\u003Cp lang=\"en\" dir=\"ltr\">Britney mic&#39;d up!\u003Cbr>\u003Cbr>On stage in half an hour 😬\u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> \u003Ca href=\"https://t.co/ivQ1V9waBW\">pic.twitter.com/ivQ1V9waBW\u003C/a>\u003C/p>&mdash; Matt Smith (@Harmelodic) \u003Ca href=\"https://twitter.com/Harmelodic/status/1181851029048102912?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nCoding in the blink of an eye!\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-partner=\"tweetdeck\">\u003Cp lang=\"en\" dir=\"ltr\">.\u003Ca href=\"https://twitter.com/Harmelodic?ref_src=twsrc%5Etfw\">@Harmelodic\u003C/a> is talking faster than \u003Ca href=\"https://twitter.com/hashtag/terraform?src=hash&amp;ref_src=twsrc%5Etfw\">#terraform\u003C/a> can deploy things :joy: Great live coding :sunglasses: \u003Ca href=\"https://twitter.com/hashtag/gitlabcommit?src=hash&amp;ref_src=twsrc%5Etfw\">#gitlabcommit\u003C/a> \u003Ca href=\"https://t.co/LS0t3GdqHx\">pic.twitter.com/LS0t3GdqHx\u003C/a>\u003C/p>&mdash; Michael Friedrich (@dnsmichi) \u003Ca href=\"https://twitter.com/dnsmichi/status/1181862263680053248?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote>\n\u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n**11:30AM BST** – How to shift left and bring security more firmly into development was the topic of a mid-morning panel discussion at Commit.\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\" data-conversation=\"none\">\u003Cp lang=\"en\" dir=\"ltr\">\u003Ca href=\"https://twitter.com/Shetti?ref_src=twsrc%5Etfw\">@Shetti\u003C/a> of \u003Ca href=\"https://twitter.com/VMware?ref_src=twsrc%5Etfw\">@VMware\u003C/a> leads a panel discussion on security in the software development life cycle with Jeremy Guido, \u003Ca href=\"https://twitter.com/plafoucriere?ref_src=twsrc%5Etfw\">@plafoucriere\u003C/a> and \u003Ca href=\"https://twitter.com/simasotiris?ref_src=twsrc%5Etfw\">@simasotiris\u003C/a>.\u003Ca href=\"https://twitter.com/hashtag/GitLabCommit?src=hash&amp;ref_src=twsrc%5Etfw\">#GitLabCommit\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/womenintech?src=hash&amp;ref_src=twsrc%5Etfw\">#womenintech\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/womeninstem?src=hash&amp;ref_src=twsrc%5Etfw\">#womeninstem\u003C/a> \u003Ca href=\"https://twitter.com/gitlab?ref_src=twsrc%5Etfw\">@gitlab\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/womenwhocode?src=hash&amp;ref_src=twsrc%5Etfw\">#womenwhocode\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/git?src=hash&amp;ref_src=twsrc%5Etfw\">#git\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/DevOps?src=hash&amp;ref_src=twsrc%5Etfw\">#DevOps\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/opensource?src=hash&amp;ref_src=twsrc%5Etfw\">#opensource\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/security?src=hash&amp;ref_src=twsrc%5Etfw\">#security\u003C/a> \u003Ca href=\"https://twitter.com/hashtag/sdlc?src=hash&amp;ref_src=twsrc%5Etfw\">#sdlc\u003C/a> \u003Ca href=\"https://t.co/lQeQYelTVv\">pic.twitter.com/lQeQYelTVv\u003C/a>\u003C/p>&mdash; Suze Shardlow at #GitLabCommit (@SuzeShardlow) \u003Ca href=\"https://twitter.com/SuzeShardlow/status/1181874495268773888?ref_src=twsrc%5Etfw\">October 9, 2019\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\nSotiraki Sima, executive director in technology risk at Goldman Sachs, stressed the benefits of starting small and being prepared to continually adapt to new technologies and new tools. [Jeremy Guido](https://fr.linkedin.com/in/jeremyguido), backend engineer with My Data Models, said designating a security leader in a development team can help to make everyone feel more like a stakeholder. And [Philippe Lafoucriere](https://about.gitlab.com/company/team/#plafoucriere), distinguished engineer at GitLab, stressed the role of automation in scaling security throughout the SDLC. The bottom line: it's a process so take it a step at a time.\n\n**1:00PM BST** – What's next for the GitLab tool? [Eric Brinkman](/company/team/#ebrinkman), director of product, dev products, outlined our technology roadmap. He began with Meltano, a six-person startup located within GitLab that is focused on bringing DevOps best practices to DataOps. Eric announced that today [version 1.0 of Meltano](https://meltano.com/blog/meltano-graduates-to-version-1-0/) is available.\n\nAnd that was just the beginning. Value stream management will be coming soon to Manage, Eric said, so users will be able to track efficiency metrics and ultimately receive recommendations. Plan stage will add high and low release requirements related to code and test. In Create, our source code management and code review will get an upgrade with an improved Web IDE and eventually the ability to do live coding. Verify will receive load testing runs by default and Secure will get [fuzzing](/direction/secure/dynamic-analysis/fuzz-testing/) as a built-in part of security testing. Changes to Release will mean automatically staged rollbacks and Configure will invest in run books to improve mean time to recovery. Protect will continue to invest in real-time threat detection capabilities. And finally auto remediation is on the horizon so at some point the largely manual (and often annoying) job of finding and fixing vulnerabilities will be a thing of the past. \"This is something that can truly bring dev, sec and ops together,\" Eric said.\n\nNote: All sessions from Commit London are being recorded and will be available on our [YouTube channel](https://youtube.com/gitlab) in 24-48 hours.\n{: .alert.alert-info}\n",[268,278,1475,1250,1041,9],{"slug":2045,"featured":6,"template":734},"live-from-commit-london","content:en-us:blog:live-from-commit-london.yml","Live From Commit London","en-us/blog/live-from-commit-london.yml","en-us/blog/live-from-commit-london",{"_path":2051,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2052,"content":2058,"config":2064,"_id":2066,"_type":14,"title":2067,"_source":16,"_file":2068,"_stem":2069,"_extension":19},"/en-us/blog/microcks-and-gitlab-part-one",{"title":2053,"description":2054,"ogTitle":2053,"ogDescription":2054,"noIndex":6,"ogImage":2055,"ogUrl":2056,"ogSiteName":720,"ogType":721,"canonicalUrls":2056,"schema":2057},"Speed up API and microservices delivery with Microcks and GitLab - Part 1","Learn how to configure Microcks for GitLab and what the use cases are for this open source Kubernetes-native tool.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683021/Blog/Hero%20Images/lightsticks.png","https://about.gitlab.com/blog/microcks-and-gitlab-part-one","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up API and microservices delivery with Microcks and GitLab - Part 1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2023-09-27\",\n      }",{"title":2053,"description":2054,"authors":2059,"heroImage":2055,"date":2061,"body":2062,"category":752,"tags":2063},[2060],"Madou Coulibaly","2023-09-27","\n\nAPI development is all the rage these days for customer and partner integration, frontend-to-backend communication, microservices orchestration, and more. Yet APIs have their challenges, including how to create a fast feedback loop on design, how different teams can work with autonomy without having to wait for each other's API implementation, and how to cope with backward compatibility tests when shipping newer versions of the API. \n\n[Microcks](https://microcks.io), an open source, Kubernetes-native tool for API mocking and testing, addresses these challenges. With Microcks, which is accepted as a Sandbox project in the [Cloud Native Computing Foundation](https://cncf.io), developers can leverage their [OpenAPI](https://www.openapis.org/), [GraphQL](https://graphql.org/), [gRPC](https://grpc.io/), [AsyncAPI](https://www.asyncapi.com/), and [Postman Collection](https://www.postman.com/collection/) assets to quickly mock and simulate APIs before writing them. Couple Microcks with GitLab and you have a powerful combination to foster collaboration, encourage rapid changes, and provide a robust delivery platform for API-based applications.\n\nIn this ongoing blog series, we will introduce you to Microcks use cases and how they fit with the GitLab platform. We'll also discuss technical integration points that will help ease the developer burden, including identity management, Git repositories, and pipeline integrations.\n\n## What is Microcks?\nMicrocks addresses two major use cases: \n- **Simulating (or mocking) an API or a microservice** from a set of descriptive assets. This can be done as soon as you start the design phase to set up a feedback loop very quickly, or later on to ease the pain of provisioning environments with a lot of dependencies.\n- **Validating the conformance of your application regarding your API specification** by running contract-test. This validation can be integrated into your CI/CD pipeline so that conformance can be checked on each and every iteration. This is of great help to enforce backward compatibility of your API of microservices interfaces.\n\nMicrocks offers a uniform and consistent approach for the various kinds of request/response APIs (REST, GraphQL, gRPC, Soap) and event-driven APIs (currently supporting eight different protocols), thereby bringing consistency for users and for automations all along your API lifecycle.\n\n## How Microcks fits into the software development lifecycle\nMicrocks is a solution based on containers and can be deployed in several configurations. It can be deployed on the developer laptop through [Docker](https://microcks.io/documentation/installing/docker-compose/), [Podman](https://microcks.io/documentation/installing/podman-compose/) or [Docker Desktop Extension](https://microcks.io/documentation/installing/docker-desktop-extension/) to assist with mocking complex environments. When it comes to team collaboration, Microcks can be deployed as a centralized instance that connects to the Git repositories of the organization, discovers the API artifacts, and then provides shared up-to-date API simulations.\n\n![diagram of how Microcks fits into development lifecycle](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks.png){: .shadow.small.center}\n\nTo ease the burden on developers (and administrators), Microcks can be configured to use your GitLab platform as an identity provider. With that configuration, integrating Microcks is seamless, and API simulations are automatically shared among development teams. Microcks fosters collaboration by providing everyone with the same “source of truth” and avoiding drift risks. The tool can also be used to lower the pain and the cost of deploying and maintaining complex QA environments because simulations are inexpensive to deploy or redeploy on-demand. Microcks deployment follows a GitOps approach.\n\nBeyond this sharing of simulations, Microcks also integrates well with CI/CD pipelines. As you release API-based applications, there is always concern about conformance of the contractualized expectations you defined using specifications like OpenAPI, GraphQL, and the like. Usually, the hardest part isn't delivering the `1.0` of this API; problems come later when you're trying to deliver the `1.3`. This latest version must still be backward compatible with the 1.0 contract if you don't want to make your consumers angry and frustrated.\n\nThis conformance validation is very well assured by Microcks using contract-testing principles. So we encourage you to plug Microcks into some `test` related jobs in your GitLab pipeline and delegate this conformance validation to your Microcks instance.\n\n![microcks-in-gitlab-workflow](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-in-gitlab-workflow.png){: .shadow.medium.center}\n\n\nEmbedding Microcks conformance testing in your pipeline is actually easy thanks to our lightweight CLI that you'll integrate in pipeline jobs. You can choose to reuse an existing Microcks instance to record results and keep history of your success or pop up a new ephemeral instance as it's lightweight and fast to bootstrap.\n\n## How to set up GitLab as an identity provider in Microcks\n\nTo start off this series, we will detail how to configure Microcks to use your GitLab platform as an identity provider. This is in fact very easy as authentication in Microcks is based on [Keycloak](https://keycloak.org) (another CNCF project) and GitLab can be set as an identity provider in Keycloak (see [official documentation](https://www.keycloak.org/docs/latest/server_admin/index.html#gitlab)).\n\n**Note:** This configuration is optional as Microcks can use any other identity provider Keycloak integrates with.\n\nKeycloak is a very common solution that may be deployed already at your organization. If not, Microcks comes with a Keycloak distribution that is pre-configured for its usage with a realm called `microcks`. We have used this realm to validate this configuration.\n\n### Create a GitLab Group Application\nThe first thing is to create a new [Group Application](https://docs.gitlab.com/ee/integration/oauth_provider.html#create-a-group-owned-application) on your GitLab instance as follows:\n- `Name`: `microcks-via-keycloak`\n- `Redirect URI`: `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint`\n- `Scopes`: `read_user`, `openid`, `profile` and `email`\n\n![gitlab-application-form](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application-form.png){: .shadow.medium.center}\n\n\nThis application uses your Keycloak instance with `https://keycloak.acme.org/realms/microcks/broker/gitlab/endpoint` as the redirect URI. As a result, we obtain an `Application ID` and an associated `Secret` we have to keep aside for the next step.\n\n![gitlab-application](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/gitlab-application.jpeg){: .shadow.medium.center}\n\n\n### Add GitLab as identity provider in Keycloak\nThe next step takes place in the Keycloak admin console. Once the correct `microcks` realm is selected, you'll just have to go to the **Identity providers** section and add a GitLab provider. Simply paste here the `Application ID` you got earlier as `Client ID` and the `Secret` as `Client Secret`. You can also choose a `Display order` if you plan to have multiple identity providers.\n\n![keycloak-identity-provider](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-identity-provider.jpg){: .shadow.medium.center}\n\n\nThen, from the **Authentication** section in the admin console, choose the browser flow and configure the `Identity Provider Redirector` as follows:\n\n- `Alias`: `GitLab`\n- `Default Identify Provider`: `gitlab`\n\n![keycloak-redirector](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/keycloak-redirector.jpg){: .shadow.medium.center}\n\n### Test your Microcks configuration\nNow open the Microcks URL into your browser and you'll be directly redirected to the GitLab login page. Enter your GitLab credentials and you will be authenticated and redirected to Microcks. \n\n![microcks-homepage](https://about.gitlab.com/images/blogimages/2023-09-27-microcks-and-gitlab-part-1-speed-up-api-and-microservices-delivery/microcks-homepage.jpeg){: .shadow.medium.center}\n\n## What's next?\nIn upcoming blogs, we'll detail how GitLab can be used in the two major use cases for Microcks. We'll see how Microcks integrates with GitLab Git repositories to discover API specifications and produce simulations, and how to integrate Microcks conformance tests into your GitLab CI/CD pipelines.\n\n_[Laurent Broudoux](https://www.linkedin.com/in/laurentbroudoux/) is a cloud-native architecture expert and enterprise integration problem lover. He has helped organizations in adopting distributed and cloud paradigms while capitalizing on their critical existing assets. He is the founder and lead developer of the [Microcks.io](https://microcks.io/) open-source project: a Kubernetes-native tool for API mocking and testing. For this, he is using his 10+ years experience as an architect in financial services where he defined API transformation strategies, including governance and delivery process._\n\n_[Madou Coulibaly](https://gitlab.com/madou) is a senior solutions architect at GitLab._\n",[1388,1476,109,9,232],{"slug":2065,"featured":6,"template":734},"microcks-and-gitlab-part-one","content:en-us:blog:microcks-and-gitlab-part-one.yml","Microcks And Gitlab Part One","en-us/blog/microcks-and-gitlab-part-one.yml","en-us/blog/microcks-and-gitlab-part-one",{"_path":2071,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2072,"content":2078,"config":2082,"_id":2084,"_type":14,"title":2085,"_source":16,"_file":2086,"_stem":2087,"_extension":19},"/en-us/blog/monitoring-team-update",{"title":2073,"description":2074,"ogTitle":2073,"ogDescription":2074,"noIndex":6,"ogImage":2075,"ogUrl":2076,"ogSiteName":720,"ogType":721,"canonicalUrls":2076,"schema":2077},"How we plan to build more observability tools on GitLab monitoring","Get the scoop on our plan to close the DevOps loop.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665484/Blog/Hero%20Images/monitoring-update-feature-image.jpg","https://about.gitlab.com/blog/monitoring-team-update","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we plan to build more observability tools on GitLab monitoring\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-08-29\",\n      }",{"title":2073,"description":2074,"authors":2079,"heroImage":2075,"date":1406,"body":2080,"category":300,"tags":2081},[855],"\nThe product team at GitLab is working to close the DevOps loop by accelerating development\non new monitoring products that will offer more observability into application performance and\nthe health of your deployments.\n\n## Where does monitoring fit into the DevOps lifecycle?\n\n[Monitoring is the final Ops stage of the DevOps loop](/direction/monitor/), coming up after the\nproduction environment is configured and the application deployed. No developer should really\nship code and forget it. Monitoring is essential to proactively respond to simple and complex\nproblems, and helps GitLab customers uphold the expectations outlined in their service\nlevel objectives (SLOs) with their users.\n\n## Our vision for monitoring at GitLab\n\nWe outlined big plans for [building out our Ops capabilities](/blog/gitlabs-2018-product-vision/) in our 2018 GitLab product vision:\n“A big milestone for GitLab will be when operations people log into GitLab every day and consider\nit their main interface for getting work done.”\n\nSince then, GitLab has been working diligently to build out our monitoring products to close the\nDevOps loop. The goal is to build instrumentation that allows developers to proactively identify\nSLO degradation and observe the impacts of code changes across multiple deployments in real-time.\nThe \"North Stars\" that guide product development in the monitoring stage include:\n\n*   **Instrument with ease**: GitLab is set up so teams have generic observability into their\napplication performance.\n*   **Resolve like a pro**: GitLab correlates incoming observability data with CI/CD events and\nsource code information so troubleshooting is easy.\n*   **Gain insights seamlessly**: Our use of container-based deployments make it simpler to\ncontinuously collect insights into production SLOs, incidents, and observability sources across\ncomplex projects and multiple applications.\n\nOne of our [core principles at GitLab is to dogfood everything](/direction/monitor/#dogfooding) —\nafter all, if it doesn’t work for us, how can it work for our customers? We begin by\nsetting up our own infrastructure teams at GitLab.com\n[to use the incident management system](https://gitlab.com/groups/gitlab-org/-/epics/1672)\nwe’re developing, and also building out GitLab self-monitoring\nso our administrators can monitor their self-managed GitLab instance the same way their\ndevelopers use GitLab to monitor their applications.\n\nWe also are committed to closing the DevOps loop by prioritizing cloud native first,\nand building tooling designed to provide more insight in to application performance and the\nhealth of deployments for Ops professionals.\n\n[Kenny Johnston](/company/team/#kencjohnston), director of product (Ops) at GitLab, gave me an\noverview of some of the new products the monitoring team is working on to help make this\nvision a reality. Watch the full video of our conversation below and check out\nthe [monitoring product roadmap](https://gitlab.com/groups/gitlab-org/-/roadmap?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=devops%3A%3Amonitor)\nfor an in-depth look at our goals and timeline.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VFju_3R0hPg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Building an observability suite to close out the DevOps loop\n\nThe top priority for the monitoring team is to close the DevOps feedback loop for GitLab customers.\nThis means that if SLOs are degraded in any way, an alert is triggered and an incident is created\nin GitLab allowing for an immediate response.\n\nOur priority product categories at this stage are metrics, cluster monitoring, and incident management,\nsays Kenny.\n\n“First I want to make sure that we can provide our customers with the instrumentation so that they\ncan define an SLO, and when their application exceeds or fails to achieve that SLO, that they can\nrespond in an instant,” says Kenny. “Once we have them doing that, we'll get a lot of good\nfeedback, and immediate feedback from users about what tools they need for diagnostic purposes.”\n\n## Measure your performance with enhanced metrics\n\nWe already have a [successful integration](https://docs.gitlab.com/ee/user/project/integrations/prometheus.html)\nwith open source metrics tool, Prometheus, which we use to collect and display performance metrics\nfor applications deployed on Kubernetes. The integration is sophisticated enough that developers\ndo not have to leave GitLab to collect important information on the impact of a merge request or\nto monitor production systems. Our product category for metrics is “viable,”  meaning customers\nare using the instrumentation we’ve developed to solve real problems, bringing us a step closer to\nclosing out the DevOps loop.\n\nDiagnostic tooling in product categories such as logging, tracing, and error tracking for monitoring\napplication performance (APM) is currently at the MVC stage, though the team has made plans to\n[accelerate development on logging](https://youtu.be/nB5KDY4nsFg) in future GitLab deployments.\n\nKenny notes that our observability suite is one of the primary ways GitLab provides value for\noperators that are thinking of making the move to cloud native.\n\n“GitLab out-of-the-box keeps up with new cloud native technologies because we're constantly\nadopting the newest versions, and our whole convention of configuration means we don't\nleave it to you to figure it out, we've figured it out for you as a default,” explains Kenny.\n\n## Simplify Kubernetes management using GitLab\n\nThere is quite a bit of overlap between product category metrics and cluster monitoring at this\nstage, as Prometheus is used to collect metrics on applications deployed using Kubernetes.\nBy offering out-of-the-box cluster monitoring on Kubernetes, we make it possible for operators\nto monitor the health of their deployed environments all in one place.\n\nOne of the [high-value cluster monitoring features](https://docs.gitlab.com/ee/user/project/clusters/#monitoring-your-kubernetes-cluster)\nwe’ve set up on GitLab is memory usage and capacity metrics (CPU) administration,\nso users can be automatically alerted if either of those numbers are out of bounds on their deployed environments.\n\n“We'd like to start adding capabilities for\n[cluster cost optimization](https://gitlab.com/gitlab-org/gitlab-ee/issues/11879), so\ninforming users not just when they're hitting capacity but when they're significantly under\ncapacity and should probably size down,” says Kenny. “That helps users who've configured a\nKubernetes cluster to not end up wasting it because it's being underutilized and not end up wasting money.”\n\nCluster monitoring was brought to “viable” stages in earlier GitLab releases as we transition to\nKubernetes, but the [product team is building out alerting ](https://gitlab.com/gitlab-org/gitlab-ee/issues/5456)\nand other cluster monitoring features in upcoming releases.\n\n## Dogfooding our new incident management system on GitLab\n\nCreating an incident management system is key to a robust observability suite on monitoring:\n“The features we've prioritized are oriented towards getting the right person the right information\nto enable them to restore the services they are responsible for as quickly as possible,” according to\nthe [category vision for an incident management system](/direction/service_management/incident_management/).\n\nBecause we recognize the urgency of building a functional incident management system,\n[GitLab is leveraging issues](/direction/service_management/incident_management/index.html#high-level-design)\nas the base for creating a viable platform. The goal is to stress the capacity of our existing\ntooling by focusing on integrations with communications tools such as Slack, Zoom, etc., so we can\naccelerate time-to-market and iterate as we go, while also focusing on building out new functionality.\n\nThe infrastructure team on GitLab.com is [dogfooding the incident management system ](https://gitlab.com/groups/gitlab-org/-/epics/1672)\nso we can put the tooling through its paces, making improvements as we go.\n\n## Outside the loop: Getting GitLab administrators to monitor GitLab using GitLab\n\nKenny says the product team has a strategy for creating more exposure to the monitoring capabilities\nGitLab has in development: putting our monitoring capabilities front and center\nfor administrators of the GitLab self-managed instance.\n\n“Today you can create a project for your application that's an e-commerce app, and get the\ninstrumentation to know whether the Kubernetes cluster is experiencing pain, whether SLOs that\nyou custom define have alerts and respond to that with incidents,” says Kenny. “We'd like you to have\nthat exact same experience, or expose you to that same experience with your GitLab self-managed\ninstance, so that as an administrator you're using the same tools to monitor and respond to\nthe GitLab instance as your developers would use to monitor and respond to their applications.”\n\nBy essentially setting up administators to dogfood the monitoring features we are providing to\ndevelopers for application management, we can ensure that they're battle-tested on a larger application.\n\n## The core challenge of the observability suite\n\nWhile the product team at GitLab has a vision and roadmap for building a comprehensive suite of\nobservability instrumentation, there isn’t a clear consensus among monitoring experts as to what\nis required for a robust observability suite in this new, cloud native world.\n\n“There's varied opinion in the new world that's Kubernetes-based about what an observability\nsystem looks like,” says Kenny. “There's a legacy view that seems to be evolving. So, we need to keep up\nwith that and of the industry's evolution of what we consider required. We as a company just\nneed to stay focused on what our users are asking for, and that's why I think\ncompleting that DevOps loop is important first, because then we'll start getting immediate user feedback.”\n\nKeep an eye out for these new monitoring updates in our 12.2 and 12.3 releases.\n\nCover photo by Glen . on [Unsplash](https://unsplash.com/search/photos/binoculars?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[9,754],{"slug":2083,"featured":6,"template":734},"monitoring-team-update","content:en-us:blog:monitoring-team-update.yml","Monitoring Team Update","en-us/blog/monitoring-team-update.yml","en-us/blog/monitoring-team-update",{"_path":2089,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2090,"content":2095,"config":2101,"_id":2103,"_type":14,"title":2104,"_source":16,"_file":2105,"_stem":2106,"_extension":19},"/en-us/blog/moving-to-gcp",{"title":2091,"description":2092,"ogTitle":2091,"ogDescription":2092,"noIndex":6,"ogImage":1220,"ogUrl":2093,"ogSiteName":720,"ogType":721,"canonicalUrls":2093,"schema":2094},"We’re moving from Azure to Google Cloud Platform","GitLab.com is migrating to Google Cloud Platform – here’s what this means for you now and in the future.","https://about.gitlab.com/blog/moving-to-gcp","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We’re moving from Azure to Google Cloud Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Newdigate\"}],\n        \"datePublished\": \"2018-06-25\",\n      }",{"title":2091,"description":2092,"authors":2096,"heroImage":1220,"date":2098,"body":2099,"category":752,"tags":2100},[2097],"Andrew Newdigate","2018-06-25","\nUpdate Jul 19, 2018: The latest info can be found in the [GCP migration update](/blog/gcp-move-update/) blog post.\n{: .alert .alert-info}\n\nImproving the performance and reliability of [GitLab.com](/pricing/)  has been a top priority for us. On this front we've made some incremental gains while we've been planning for a large change with the potential to net significant results: moving from Azure to Google Cloud Platform (GCP).\n\nWe believe [Kubernetes](/solutions/kubernetes/) is the future. It's a technology that makes reliability at massive scale possible. This is why earlier this year we shipped native [integration with Google Kubernetes Engine](/blog/gke-gitlab-integration/) (GKE) to give GitLab users a simple way to use Kubernetes. Similarly, we've chosen GCP as our cloud provider because of our desire to run GitLab on Kubernetes. Google invented Kubernetes, and GKE has the most robust and mature Kubernetes support. Migrating to GCP is the next step in our plan to make GitLab.com ready for your mission-critical workloads.\n\nOnce the migration has taken place, we’ll continue to focus on bumping up the stability and scalability of GitLab.com, by moving our worker fleet across to Kubernetes using GKE. This move will leverage our [Cloud Native charts](https://gitlab.com/charts/gitlab), which with [GitLab 11.0](/releases/2018/06/22/gitlab-11-0-released/#cloud-native-gitlab-helm-chart-now-beta) are now in beta.\n\n## How we’re preparing for the migration\n\n### Geo\n\nOne GitLab feature we are utilizing for the GCP migration is our [Geo product](https://docs.gitlab.com/ee/administration/geo/).\nGeo allows for full, read-only mirrors of GitLab instances. Besides browsing the GitLab UI, Geo instances can be used for cloning and fetching projects, allowing geographically distributed teams to collaborate more efficiently.\n\nNot only does that allow for disaster recovery in case of an unplanned outage, Geo can also be used for a planned failover to migrate GitLab instances.\n\n![GitLab Geo - Migration](https://about.gitlab.com/images/gitlab_ee/gitlab_geo_diagram_migrate.png){: .medium.center}\n\nFollowing our mantra of dogfooding everything of our product, we are using Geo to move GitLab.com from Microsoft Azure to Google Cloud Platform. Geo is working well and scales because it's been used by many customers reliably since going GA. We believe Geo will perform well during the migration and plan this event as another proof point for its value.\n\nRead more about Disaster Recovery with Geo in our [Documentation](https://docs.gitlab.com/ee/administration/geo/disaster_recovery/).\n\n#### The Geo transfer\n\nFor the past few months, we have maintained a Geo secondary site of GitLab.com, called `gprd.gitlab.com`, running on Google Cloud Platform. This secondary keeps an up-to-date synchronized copy of about 200TB of Git data and 2TB of relational data in PostgreSQL. Originally we also replicated Git LFS, File Uploads and other files, but this has since been migrated to Google Cloud Storage object storage, in a parallel effort.\n\nFor logistical reasons, we selected GCP's `us-east1` site in the US state of South Carolina. Our current Azure datacenter is in US East 2, located in Virginia. This is a round-trip distance of 800km, or 3 light-milliseconds. In reality, this translates into a 30ms ping time between the two sites.\n\nBecause of the huge amount of data we need to synchronize between Azure and GCP, we were initially concerned about this additional latency and the risk it might have on our Geo transfer. However, after our initial testing, we realized that network latency and bandwidth were not bottlenecks in the transfer.\n\n### Object storage\n\nIn parallel to the Geo transfer, we are also migrating all file artifacts, including CI Artifacts, Traces (CI log files), file attachments, LFS objects and other file uploads to [Google Cloud Storage](https://cloud.google.com/storage/) (GCS), Google's managed object storage implementation. This has involved moving about 200TB of data off our Azure-based file servers into GCS.\n\nUntil recently, GitLab.com stored these files on NFS servers, with NFS volumes mounted onto each web and API worker in the fleet. NFS is a single-point-of-failure and can be difficult to scale. Switching to GCS allows us to leverage its built-in redundancy and multi-region capabilities. This in turn will help to improve our own availability and remove single-points-of-failure from our stack. The object storage effort is part of our longer-term strategy of lifting GitLab.com infrastructure off NFS. The [Gitaly project](https://gitlab.com/gitlab-org/gitaly), a Git RPC service for GitLab, is part of the same initiative. This effort to migrate GitLab.com off NFS is also a prerequisite for our plans to move GitLab.com over to Kubernetes.\n\n### How we're working to ensure a smooth failover\n\nOnce or twice a week, several teams, including [Geo](https://handbook.gitlab.com/handbook/engineering/infrastructure-platforms/developer-experience/performance-enablement/systems/geo/), [Production](https://handbook.gitlab.com/handbook/engineering/infrastructure/production/), and Quality, get together to jump onto a video call and conduct a rehearsal of the failover in our staging environment.\n\nLike the production event, the rehearsal takes place from Azure across to GCP. We timebox this event, and carefully monitor how long each phase takes, looking to cut time off wherever possible. The failover currently takes two hours, including quality assurance of the failover environment.\n\nThis involves four steps:\n\n- A [preflight checklist](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/preflight_checks.md),\n- The main [failover procedure](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/failover.md),\n- The [test plan](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/test_plan.md) to verify that everything is working, and\n- The [failback procedure](https://gitlab.com/gitlab-com/migration/blob/master/.gitlab/issue_templates/failback.md), used to undo the changes so that the staging environment is ready for the next failover rehearsal.\n\nSince these documents are stored as issue templates on GitLab, we can use them to create issues on each successive failover attempt.\n\nAs we run through each rehearsal, new bugs, edge-cases and issues are discovered. We track these issues in the [GitLab Migration tracker](https://gitlab.com/gitlab-com/migration/issues). Any changes to the failover procedure are then made as [merge requests into the issue templates](https://gitlab.com/gitlab-com/migration/merge_requests?scope=all&state=all).\n\nThis process allows us to iterate rapidly on the failover procedure, improving the failover documentation and helping the team build confidence in the procedure.\n\n## When will the migration take place?\n\nOur absolute [top priority](https://gitlab.com/gitlab-com/migration#failover-priorities) for the failover is to ensure that we protect the integrity of our users' data. We will only conduct the failover once we are completely satisfied that all serious issues have been ironed out, that there is no risk of data loss, and that our new environment on Google Cloud Platform is ready for production workloads.\n\nThe failover is currently scheduled for Saturday, July 28, 2018. We will follow this post up shortly with further information on the event and will provide plenty of advance notice.\n\nRead the most recent update on [GitLabs journey from Azure to GCP](/blog/gitlab-journey-from-azure-to-gcp/) here!\n",[1147,9,1229,859],{"slug":2102,"featured":6,"template":734},"moving-to-gcp","content:en-us:blog:moving-to-gcp.yml","Moving To Gcp","en-us/blog/moving-to-gcp.yml","en-us/blog/moving-to-gcp",{"_path":2108,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2109,"content":2115,"config":2122,"_id":2124,"_type":14,"title":2125,"_source":16,"_file":2126,"_stem":2127,"_extension":19},"/en-us/blog/multi-cloud-security",{"title":2110,"description":2111,"ogTitle":2110,"ogDescription":2111,"noIndex":6,"ogImage":2112,"ogUrl":2113,"ogSiteName":720,"ogType":721,"canonicalUrls":2113,"schema":2114},"A brief guide to multicloud security","Five challenges and seven best practices to consider for your multicloud strategy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679136/Blog/Hero%20Images/multi-cloud-security.jpg","https://about.gitlab.com/blog/multi-cloud-security","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A brief guide to multicloud security\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-11-21\",\n      }",{"title":2110,"description":2111,"authors":2116,"heroImage":2112,"date":2118,"body":2119,"category":815,"tags":2120},[2117],"Vanessa Wegner","2019-11-21","\nMany agree that multicloud is worth the risk.\n\nThe multicloud trend has taken hold in recent years, with [RightScale finding\nthat 84% of enterprises run a multicloud strategy](https://www.flexera.com/blog/cloud/2019/02/cloud-computing-trends-2019-state-of-the-cloud-survey/). With multicloud,\norganizations deploy applications across two or more cloud platforms, like\nAWS, Azure, or Google Cloud.\n\nIncreased flexibility is one of the biggest appeals of a [multicloud strategy](/topics/multicloud/).\nCompanies avoid vendor lock-in by deploying workloads to different cloud platforms\nbased on cost and application needs. Hyperscale cloud vendors have data centers\nacross the globe, so organizations are able to control their cloud expenditures\nby scheduling workloads based on location and local time. Multicloud also\nprotects business operations by reducing down time, and improving resilience in\nthe event of an outage or workload-disruptive breach (like a DDoS attack).\n\nHowever, multicloud still has drawbacks that require careful consideration.\nThe increased complexity of a multicloud environment exponentially increases\nan organization’s attack surface and level of risk. Most of these risks can be\nmitigated with a thorough assessment and strategy addressing security needs –\nand as [a study from IDG and IBM has found](https://www.ciosummits.com/Online_Assets_IBM_Whitepaper_-_Multi-cloud_Organizations_Confront_IT_Security_Challenges.pdf),\n70% of survey respondents agreed that the benefits of multicloud outweigh the risks.\n\nThat being said, there’s a lot to consider. In this blog, we’ll run through\nsome of the top security challenges of multicloud, and dig into the strategies\nto conquer them. If you're short on time, feel free to skip down to the best\npractices section.\n\n### Key security challenges and how to manage them\n\n#### Access and permissioning\n\nMulticloud adds complexity to your identity and access management efforts.\nEmployees need access to multiple cloud services as part of their daily work,\nand will access your data from a multitude of locations and devices. We\nrecommend you take a Zero Trust approach here: Allow access on an as-needed\nbasis, and no more. Data classification levels can help you streamline access\ndeterminations across different clouds, but the key idea is that limited access\nwill both protect your most mission critical and sensitive information, and\nallow you a clear view of when (and by whom) that information is accessed.\n\n#### Staying up to date\n\nWhile this is a security concern for any cloud use, upgrades and patching in\nmulticloud are more challenging because the vulnerabilities and mitigations\nfrom each cloud service provider are different. Multicloud complexity also\nmakes it difficult to keep track of vulnerabilities as applications communicate\nacross multiple clouds. [Mike Bursell from RedHat\ncalls this need “workload freshness”](https://enterprisersproject.com/article/2019/10/multi-cloud-security-issues-watch) – and suggests that this might require you\nto upgrade or patch in place, restart the workload with the latest image, or\ncheck and reload recent dependencies, in order to maintain the most recent\nversions of any dependent libraries, middleware, or executables.\n\n#### A disjointed view of security\n\nMost cloud vendors offer native tools to help you manage security within their\ncloud platform, and most of those tools can’t be applied to other vendors. This\ndisjointed approach to monitoring makes it difficult to gain a thorough\nunderstanding of all the vulnerabilities present in your infrastructure.\n\nInstead of making piecemeal security sense, adopt a multicloud management tool\nthat serves as a single pane of glass into all the happenings across all of your\ncloud platforms. Bursell notes that any monitoring tool needs to be fully aware\nof the scope of your deployment. It’s also important to have regular, if not\nreal-time, updates to your data view so that you’re aware of unusual changes or\nactivities and can address attacks as they come in. A centralized tool is also\nvaluable for conducting forensic analysis of your systems in the event of a\nlate-discovered breach.\n\n#### Control plane complexity\n\nRedHat’s Bursell defines the control plane as any communication which controls\nyour applications or how they are run. In addition to securing communications\nbetween and within applications, all scheduling, monitoring, and routing\ncommunications should also be encrypted. It’s critical to secure the\nadministration, logging, and audit functionality of your applications\n(lest you want to give hackers the opportunity to take down your entire\ninfrastructure). [David Locke of World Wide Technology writes\nthat security functionality and enforcement needs to be uniform within all of\nyour cloud environments](https://www.datacenterdynamics.com/opinions/security-challenges-multicloud-evolution/), allowing those functions to communicate and coordinate\nbetween themselves and support security automation.\n\n#### Application hardening\n\nWhen hardening your infrastructure, Bursell recommends knowing what APIs are\nexposed, understanding what controls you have on them, and planning what\nmitigations you can apply if they come under attack. [Tripwire notes that\nany software that your organization develops or acquires](https://www.tripwire.com/state-of-security/security-data-protection/cloud/multi-cloud-security-best-practices-guide/) from a third party must\nbe patched and security hardened by your organization.\n\n### Best practices\n\nNeed a TL;DR? We’ve got you covered:\n\n**Key security capabilities and strategies:** Multi-factor authentication,\ncloud workload security, security analytics, encryption, identity and access\nmanagement, cloud security gateways, microsegmentation, threat modeling,\nthreat intelligence, and endpoint detection and response.\n\n**Keep things consistent:** Develop a set of security policies and procedures\nto enforce on all of your clouds (and any on-prem software too, for that matter).\nWhile there will almost always be some kind of incompatibility, a benchmark or\nstandardized security policy will reduce the risk of oversights.\n\n**Cloud agnostic software:** Use security tools that can easily integrate with\nany cloud service, and that can scale with increased apps and workloads.\n\n**Go beyond your CSP’s tools:** Your cloud providers have tools to keep their\nofferings safe, but protection of the data itself falls to you. Some vendors\nmay be able to advise which capabilities you need within their infrastructure\nto keep your data safe.\n\n**Confidential computing:** Data protection usually focuses on data at rest and\nin transit, but what about data in use? Protect data as it is being processed,\nand always know _where_ the data is being used. Confidential computing will\nallow encrypted data to be processed in memory without exposing it to the rest\nof the system. This is a relatively new area, so consider keeping tabs on\nthe [Confidential Computing Consortium](https://confidentialcomputing.io/) to\nstay in the loop.\n\n**Anticipate unforeseen changes:** Planning for the unknown seems like an\noxymoron – but in tech, it’s not. Things change constantly, and often in ways\nwe don’t predict. Make sure your systems and environments can adapt to whatever\nthe market throws at you.\n\n**Stay informed of new computing trends:** For instance, [Nick Ismail from\nInformation Age highlights that serverless computing adoption is growing](https://www.information-age.com/business-multicloud-strategy-123471227/) as it allows cloud\ninstances to be scaled and patched instantly, and machine learning will be able\nto help servers identify patterns of malicious behavior and respond faster than\nhuman administrators can respond.\n\n## Looking ahead\n\nJust like every market, cloud will continue to change as vendors make new\nalliances and focus on new capabilities. In 2020, [Forrester predicts](https://go.forrester.com/blogs/predictions-2020-cloud/)\nthat hyperscale global public cloud leaders will form more alliances, while\ncloud management vendors will shift their focus to security – after a\nhigh-visibility data breach. Take steps to ensure that that breach isn’t yours\nby assessing the current and future state of your cloud strategy, and infusing\nsecurity into everything you do.\n\nCover image by [Michael Weidner](https://unsplash.com/@michaelbweidner) on [Unsplash](https://unsplash.com/photos/h-rP5KSC2W0).\n{: .note}\n",[979,2121,9],"zero trust",{"slug":2123,"featured":6,"template":734},"multi-cloud-security","content:en-us:blog:multi-cloud-security.yml","Multi Cloud Security","en-us/blog/multi-cloud-security.yml","en-us/blog/multi-cloud-security",{"_path":2129,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2130,"content":2136,"config":2142,"_id":2144,"_type":14,"title":2145,"_source":16,"_file":2146,"_stem":2147,"_extension":19},"/en-us/blog/next-generation-gitlab-container-registry-goes-ga",{"title":2131,"description":2132,"ogTitle":2131,"ogDescription":2132,"noIndex":6,"ogImage":2133,"ogUrl":2134,"ogSiteName":720,"ogType":721,"canonicalUrls":2134,"schema":2135},"Next-generation GitLab container registry goes GA","Starting in GitLab 17.3, GitLab self-managed instances can access the generally available container registry, which features efficient zero-downtime garbage collection and other benefits.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662332/Blog/Hero%20Images/blog-image-template-1800x945__23_.png","https://about.gitlab.com/blog/next-generation-gitlab-container-registry-goes-ga","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Next-generation GitLab container registry goes GA\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2024-07-23\",\n      }",{"title":2131,"description":2132,"authors":2137,"heroImage":2133,"date":2139,"body":2140,"category":795,"tags":2141},[2138],"Tim Rizzi","2024-07-23","Last year, we embarked on an ambitious journey to [re-architect the GitLab container registry](https://gitlab.com/gitlab-org/container-registry/-/issues/199) and unlock powerful new capabilities like zero-downtime garbage collection. After successfully migrating GitLab.com to this next-generation registry, we [opened up a beta program](https://about.gitlab.com/blog/gitlabs-next-generation-container-registry-is-now-available/) for self-managed customers to test out the new architecture and provide feedback.\n\nThe results from the beta program have been outstanding – participants are already realizing major benefits, including the following:\n\n- significant storage cost and maintenance time savings from efficient zero-downtime garbage collection, with no required downtime or manual interventions\n- improved performance and reliability for tag cleanup policies and the container registry API and UI\n- early access to new features like better sorting/filtering and storage usage visibility\n\nBased on the positive feedback and successful migrations during the beta, we are excited to announce that the next-generation GitLab container registry will become generally available – but off by default – for self-managed deployments starting with GitLab 17.3.\n\nBelow are the goals and non-goals for reaching this point. The goals are what we need to have in place to officially call this feature GA. The non-goals clarify what will not be present or required at the start of GA support for bringing your own database; however, these features may be added later.\n\n__Goals__\n- The import process is free of known bugs.\n- Import documentation reflects known best practices and addresses feedback from the [beta program](https://gitlab.com/gitlab-org/gitlab/-/issues/423459).\n- Registry API, metadata database, and zero-downtime garbage collection are stable and reliable.\n- Able to automatically apply database schema migrations for Charts installs during upgrades.\n- Provide registry database as an opt-in improvement.\n\n__Non-goals__\n- Automatically provision registry database.\n- Automatically apply database schema migrations for omnibus installs during upgrades.\n- Automatically import object storage data.\n- Provide Geo support to ensure your registry is highly available.\n\nFor existing self-managed instances, here's what you can expect:\n\n- In GitLab 17.3, the new registry will be included, but disabled by default to allow time for planning migrations.\n- Enabling the database will be an opt-in process outlined in the [documentation](https://docs.gitlab.com/ee/administration/packages/container_registry_metadata_database.html).\n- The legacy container registry will still receive security updates, but new features and improvements will only be developed for the next-gen version.\n- We will target GitLab 19.0 for the legacy registry to stop being supported after over a year of co-existence.\n- Our goal is to make this transition as seamless as possible while putting customers in control of their migration timeline. The [documentation](https://docs.gitlab.com/ee/administration/packages/container_registry_metadata_database.html) covers all the details on how to plan and execute the move to the next-gen registry.\n\nThis architectural investment lays the foundation for an even more powerful container registry experience in the years ahead. Some of the significant improvements on our roadmap include:\n\n- protected repositories and immutable tags\n- improved Helm chart management\n- improved support for signing and attestations\n- many more UX/UI enhancements are only possible with the database architecture\n\nWe couldn't have reached this GA milestone without the valuable feedback from our beta participants. As always, please continue to share your experiences so we can make the GitLab container registry an indispensable part of your DevSecOps toolchain.\n\n> You can try the container registry today with a [free trial of GitLab Ultimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial).",[542,796,9,795],{"slug":2143,"featured":91,"template":734},"next-generation-gitlab-container-registry-goes-ga","content:en-us:blog:next-generation-gitlab-container-registry-goes-ga.yml","Next Generation Gitlab Container Registry Goes Ga","en-us/blog/next-generation-gitlab-container-registry-goes-ga.yml","en-us/blog/next-generation-gitlab-container-registry-goes-ga",{"_path":2149,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2150,"content":2156,"config":2163,"_id":2165,"_type":14,"title":2166,"_source":16,"_file":2167,"_stem":2168,"_extension":19},"/en-us/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development",{"title":2151,"description":2152,"ogTitle":2151,"ogDescription":2152,"noIndex":6,"ogImage":2153,"ogUrl":2154,"ogSiteName":720,"ogType":721,"canonicalUrls":2154,"schema":2155},"Observability's role in cloud-native app development","Want better visibility into the entire software development lifecycle across environments? Learn how observability can help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663993/Blog/Hero%20Images/2018-developer-report-cover.jpg","https://about.gitlab.com/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Observability is key to cloud-native transitions and modern application development\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2022-04-05\",\n      }",{"title":2157,"description":2152,"authors":2158,"heroImage":2153,"date":2160,"body":2161,"category":815,"tags":2162},"Observability is key to cloud-native transitions and modern application development",[2159],"Sandra Gittlen","2022-04-05","\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes._\n\nModern application development requires DevOps teams to be able to collaborate and react to what is happening across the software development lifecycle. Yet, as companies move away from monolithic code bases resident on a server or cluster of virtual machines to cloud-native environments, this goal becomes more difficult to achieve. Cloud-native architectures are more complex with more elements to configure, protect, execute, and measure. To ensure maximum visibility and responsiveness to issues early on in application development and throughout the lifecycle, companies are adopting observability.\n\n## Observability defined\n\nObservability, which [451 Research](https://451research.com/) defines as the collection and analysis of data logs, metrics, and traces, becomes critical and essential with cloud-native technologies and acts as a step beyond monitoring. “The need for such an approach has been brought to the fore by complex, distributed microservices-based applications where the variables are so numerous that it can be impossible to know exactly what metrics need to be collected for the gamut of potential events that could arise,” 451 Research’s “Voice of the Enterprise: DevOps, Organizational Dynamics - Advisory Report” states.\n\n“A need to know what is happening with infrastructure and applications, particularly across hybrid and multi-cloud infrastructure, has driven broad adoption of observability,” according to the report.\n\n## How observability improves cloud-native tech adoption\n\nMore than half of organizations surveyed by 451 Research report either full adoption or some adoption at the team level of cloud-native technologies such as containers, Kubernetes, service mesh, and serverless computing. Another quarter to one-third of respondents plans to deploy cloud-native technologies.\n\nThe challenge is visibility across this new, more complex architecture. While cloud-native technologies offer more flexibility and cost efficiencies for computing resources, they can make it difficult to gain end-to-end visibility of software vulnerabilities, application performance, and quality assessments, and to be able to know where and how to affect change early on in the development lifecycle.\n\nDevOps improvements such as security and analytics are driving the adoption of observability, as is the increased need for compliance. With observability, according to 451 Research’s report, “one can query the data they have and ask any number of questions about a system, and, ideally, get an answer without having to predefine the exact data collected or tagging applied to answer the question.”\n\nIn other words, observability can provide a more flexible toolkit and enable a more active drill-down into what’s actually happening in the development lifecycle. With properly implemented observability, DevOps teams can, in real-time, identify a problem, fix it, benchmark the improvement, and measure it going forward – even in a cloud-native environment that is abstracted from knowledge of underlying systems. Having the ability to observe and measure your end-to-end DevOps efforts can reduce risk and provide greater control of cloud-native environments. \n\nDigital transformation leaders and laggards alike understand the need for observability. Nearly two-thirds of all respondents say they have adopted observability (41%) or have it in discovery/proof of concept (23%). Nearly a third plan to implement it within 12 to 24 months.\n\n“While it is great to see these adoption rates, the ultimate goal is to evolve observability’s inputs into actionable insights that positively impact the business,” says Sebastien Pahl, principal product manager at GitLab and co-founder of observability start-up OpsTrace (which was [acquired by GitLab in 2021](/press/releases/2021-12-14-gitlab-acquires-opstrace-to-expand-its-devops-platform-with-open-source-observability-solution.html)).\n\n## The benefits of observability\n\nIn modern application development, dev, sec, and ops teams share the responsibility of software development and delivery. In mature organizations, DevOps can extend to include stakeholders from compliance, legal, finance, and other departments with a direct stake in value delivery. Observability provides DevOps teams greater flexibility in how to utilize and share data across an organization.\n\nPahl likens observability to a flight crew being able to see, learn from, and react to all the data from instruments and dashboards on a plane as it is flying. “With observability, everyone can look at the same data through a different lens,” he says.\n\nObservability has significant benefits, including the following:\n\n- Developers can add code early in the development lifecycle for events they want to observe.\n\n- DevOps teams can move faster because they know when something is wrong and exactly what is wrong. They can fix problems once and move on.\n\n- Organizations can detect problems before customers do.\n\n- DevOps teams can assign certain alerts to specific individuals or teams so ops teams won’t be burned out responding to general alerts.\n\n- The inputs and metrics written through observability lay the foundation for AI and machine learning.\n\n## Observability and the DevOps Platform\n\nGitLab believes that [observability is foundational](https://opstrace.com/blog/gitlabobsvervabilityui) to a DevOps platform, and will make the capability available to all GitLab users. [Our vision](/direction/monitor/) is to make every GitLab project observable by default, with features that are easy to operate without specialized, expert skills. Teams can connect the dots between every deployment, incident, and other noteworthy events using and collaborating with telemetry data, which ultimately decreases the frequency and severity of production issues.\n\nGitLab’s observability capability is completely open-sourced and relies on open APIs such as Prometheus and OpenTelemetry so users don’t have to worry about vendor lock-in from instrumentation to alerting. It’s built into the GitLab DevOps platform to help you use the capability right away within your native workflow.\n\nLearn more about [observability and the DevOps Platform](https://about.gitlab.com/).\n\n\n\n\n\n\n\n",[563,9,731,796],{"slug":2164,"featured":6,"template":734},"observability-is-key-to-cloud-native-transitions-and-modern-application-development","content:en-us:blog:observability-is-key-to-cloud-native-transitions-and-modern-application-development.yml","Observability Is Key To Cloud Native Transitions And Modern Application Development","en-us/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development.yml","en-us/blog/observability-is-key-to-cloud-native-transitions-and-modern-application-development",{"_path":2170,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2171,"content":2177,"config":2183,"_id":2185,"_type":14,"title":2186,"_source":16,"_file":2187,"_stem":2188,"_extension":19},"/en-us/blog/pick-your-brain-interview-brandon-foo",{"title":2172,"description":2173,"ogTitle":2172,"ogDescription":2173,"noIndex":6,"ogImage":2174,"ogUrl":2175,"ogSiteName":720,"ogType":721,"canonicalUrls":2175,"schema":2176},"Pick Your Brain interview with CEO Sid Sijbrandij","Brandon Foo, co-founder and CEO of Polymail (YC S16), recently sat down with GitLab CEO Sid Sijbrandij.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680453/Blog/Hero%20Images/pick-your-brain-interview.jpg","https://about.gitlab.com/blog/pick-your-brain-interview-brandon-foo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pick Your Brain interview with CEO Sid Sijbrandij\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brandon Foo\"}],\n        \"datePublished\": \"2017-06-02\",\n      }",{"title":2172,"description":2173,"authors":2178,"heroImage":2174,"date":2180,"body":2181,"category":815,"tags":2182},[2179],"Brandon Foo","2017-06-02","\n\nI sat down for a “[pick your brain](https://handbook.gitlab.com/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)” meeting with GitLab’s CEO and Co-founder, [Sid Sijbrandij](/company/team/#sytses), to learn about his approach towards different aspects of building a successful startup. Here are some highlights of the conversation.\n\n\u003C!-- more -->\n\n**Brandon: When you were an earlier company around your seed stage, what were your most effective growth strategies?**\n\n**Sid:** GitLab got started as a [Show HN of GitLab.com](https://news.ycombinator.com/item?id=4428278). We’ve always tried to see where our users were and talk with them there.\n\nWhen you find people who have a need for your product, you start by trying to bring it to their attention. Then you enter a phase where they care about your product, and they start asking you for more — that’s easy, that’s the honeymoon phase. Now we’re getting to the phase where people think of GitLab as a given, and that it should be perfect, so they tell you the things that could be better.\n\n**Brandon: How do you think about product strategy with respect to building new features versus improving or increasing adoption of existing features?**\n\n**Sid:** It’s kind of a pendulum that swings back and forth. We focused a lot on new features for a while to accomplish our [idea to production vision](https://www.youtube.com/watch?v=PoBaY_rqeKA), and now this quarter [we’re focusing](/direction/) on increasing adoption of existing features. Mostly this is necessary for newer features, but that’s not the same as increasing the features’ scope, it’s more a question of how we can increase adoption for the features we already have, and seeing which functions are missing. When we release features and have the suspicion few people are using them, we evaluate to make sure those features are things that people can really use. Most recently in [9.2](/releases/2017/05/22/gitlab-9-2-released/) we added the framework to translate GitLab into any language, and allowed users to specify multiple assignees to better track shared ownership of an issue. [In 2017](/direction/#2017-goals), we’ll continue to ship features tailored for enterprise development teams, and make it easier to build, deploy, and monitor applications within GitLab.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WBf_DA0FF9k\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n**Brandon: How do you balance building visionary features that people aren’t necessarily asking for vs. building in direct response to customer requests?**\n\n**Sid:** We do both. We started off doing just version control and code review, and now GitLab delivers the entire DevOps pipeline, everything from chatting about an idea and planning it, to getting it out in production and monitoring. We envision enabling everyone to collaborate on digital content, so they can work together and achieve better results. No one asked for that — it’s something we did, it’s the future of the company now. We’d have been in a bad spot if we hadn’t done that.\n\nAt the same time, don’t lose track of what your customers are asking for. Balancing that is the hard part. The natural result is too little visionary stuff; if you build the right company, then everyone will be listening to your customers and screaming, “Let’s build the things customers want!” So the leadership’s task is focusing on what we need to do in order to be a better company in five years.\n\n**Brandon: Since you bootstrapped for some time, how did you decide when it was the right time to raise institutional funding?**\n\n**Sid:** One big reason is the talent we wanted to attract. While we were in YC, we tried to hire a good sales leader, but everyone we approached wanted stock in the company. We hadn’t raised any outside money so stock was all mine and my co-founder [Dmitriy’s](/company/team/#dzaporozhets) — he started GitLab and I started GitLab.com.\n\nThis made clear that if we were unable to give out stock, we were not going to hire the best people; if we’re not getting the best people, we’re going to lose in the marketplace. If you give people stock while not taking outside money, you’ll still grow but very slowly, which is not the kind of deal these executives were expecting. They expect that after 6-7 years the stock is worth something and they can get liquid. The only way to get there is to attract external capital.\n\n**Brandon: Is there anything that you would change in retrospect that you think might improve the outcome of where GitLab is today?**\n\n**Sid:** In hindsight, I’d rather have started GitLab.com a bit later. We’ve grown so fast since then that we’ve been behind in making a great experience for our users.\n\nI would focus on people running GitLab self-managed, and start GitLab.com when we were ready for it. I’d rather have people not use our product than using the product and not being absolutely happy about it. It’s not about users, it’s about happy users.\n\nIf not 100% of the users are happy, we’re not doing a good enough job.\n\n## About the Guest Author\n\nBrandon Foo is the Co-founder and CEO of [Polymail](https://polymail.io/), an email productivity platform designed for modern teams and companies.\n",[9,755,1475],{"slug":2184,"featured":6,"template":734},"pick-your-brain-interview-brandon-foo","content:en-us:blog:pick-your-brain-interview-brandon-foo.yml","Pick Your Brain Interview Brandon Foo","en-us/blog/pick-your-brain-interview-brandon-foo.yml","en-us/blog/pick-your-brain-interview-brandon-foo",{"_path":2190,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2191,"content":2197,"config":2205,"_id":2207,"_type":14,"title":2208,"_source":16,"_file":2209,"_stem":2210,"_extension":19},"/en-us/blog/pick-your-brain-interview-cedric-savarese",{"title":2192,"description":2193,"ogTitle":2192,"ogDescription":2193,"noIndex":6,"ogImage":2194,"ogUrl":2195,"ogSiteName":720,"ogType":721,"canonicalUrls":2195,"schema":2196},"Pick Your Brain interview: FormAssembly CEO Cedric Savarese","GitLab CEO Sid Sijbrandij and FormAssembly CEO Cedric Savarese met online to talk remote culture, hiring and scaling.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680396/Blog/Hero%20Images/pick-your-brain-with-cedric-savarese.jpg","https://about.gitlab.com/blog/pick-your-brain-interview-cedric-savarese","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pick Your Brain interview: FormAssembly CEO Cedric Savarese\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ashley McAlpin\"}],\n        \"datePublished\": \"2017-08-11\",\n      }",{"title":2192,"description":2193,"authors":2198,"heroImage":2194,"date":2200,"body":2201,"category":2202,"tags":2203},[2199],"Ashley McAlpin","2017-08-11","\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](https://handbook.gitlab.com/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or discussion of other things related to GitLab._\n\nNavigating growth in a high-tech remote organization can be challenging. CEO of [FormAssembly](https://www.formassembly.com/), Cedric Savarese, recently sat down with GitLab CEO Sytse (Sid) Sijbrandij to chat about remote culture, hiring and scaling.\n\n\u003C!-- more -->\n\nRemote culture can be difficult to navigate. At FormAssembly, we approach our [team](https://www.formassembly.com/team/) and the way we work by carefully considering job functions and responsibilities, geographical location and team needs overall. During their chat, Cedric and Sid discussed addressing third-party perceptions as a growing remote team — here are some of the highlights.\n\n## Can remote sales teams really grow successfully?\n\n**Cedric:** I’ve heard it especially for sales teams, where it seems to be that salespeople kind of benefit from being in the sort of boiler room environment where they kind of feed on each other’s energy, to successfully grow remotely. You guys do enterprise sales, so is that something that you’ve found to be true? Or are you considering having some teams maybe more concentrated?\n\n**Sid:** Yep, certainly we were under the same assumptions, so we were completely remote at Y Combinator and everyone’s like “Yeah, it works for developers, they’re used to it, they like it, but it doesn’t work for anything else.” So we’re like, ok, we’re not going to be, one of our values is boring solutions, we’re not going to like try to be innovative here. We got an office, I’m sitting in it now, and we got nine desks or something here, and we’re like “We’ll grow  out of this, but it’s a good start. We’ll hire the salespeople and they’ll be working the phones and they’ll be high-fiving one another. So salespeople came in and after a few days, they kind of stopped coming in. And I was like, ok, well, what I’m gonna tell them? You have to be here man. No, that’s like, sort of, like in our handbook. We value results, I don’t particularly care how you get it done, just get it done. So I wanted to stay true to that and actually now that I’ve talked to more and more people, most people say “Oh, enterprise sales team, they’re kind of remote anyway because as soon as you grow, you split it up by geography, especially enterprise sales, so people are spread across the country anyway.”\n\n**Cedric:** Yeah, that’s true that the larger the organization gets, the more they have to spread out, even if you’re not truly remote, you’re going to be in bigger buildings, you’re going to be in different buildings and then you’re going to be in satellite offices and so on, so in the end, you’re doing the same as a remote team.\n\n## How do you address communication and collaboration as a remote team?\n\n**Sid:** So what we do is we create lots of like artifacts. We extensively use issues in Google docs and we tend to write things down. In an on-premises company, you can get away with doing a lot of things verbally, with us, it’s very often written down so there is an issue to refer to, there is a doc to refer to, and like making your own notes and not putting them in the doc is kind of like a cardinal sin. That’s not ok, we should all be on the same page. And we recognize that takes effort but you’re not going to, that’s what makes us work together efficiently. I think that if you do it right it’s easy to have a high-growth company that’s remote but that, well, maybe not necessarily remote, but that has a really good handbook. We went from nine people to 150 people in two years, that is normally your culture dilutes a lot because everyone kind of verbally – you get the telephone game where it gets worse over time. The message gets more garbled every time it’s transferred. Guess what, we don’t use telephone, we use a handbook, and the message doesn’t get more garbled, the message just gets better because continually we’re updating that so we don't end up with a diluted culture, we end up with an enhanced cultures and customs and practices.\n\n**Cedric:** Do you spend any time in person with your new hires or do you just start fully remote?\n\n**Sid:** No, we don’t spend any time in person, either during the hiring process or after they start.\n\n**Cedric:** Do you think there’s value in spending some time with someone in person before kind of letting them loose on a remote project?\n\n**Sid:** Yeah, I guess there’s value. When people are close to each other, we sometimes recommend, like, hey this person’s close to you, consider, especially your first month, spending a day together every week or something. But not that often, and it’s not necessary, I think. But, there is value in having a buddy, so we assign a buddy. There is value in meeting people in the company, so we require everyone to have 10 virtual coffee breaks, and try to distill, the trick is distill what was the in-person thing good for? Well to have a very low-friction way of asking people a potentially stupid question. Well, assign them a buddy so they can ask those questions. Unpack what the interaction is and organize it.\n\n## Does your leadership team get together outside of the team reunions?\n\n**Sid:** We don’t. We do have executive get-togethers. We call them remote off-site where we spend two mornings, working to our quarterly plan, but that’s remote. I think one of the challenges is that you need, as an executive team, you need some talking time together. So we plan that, but because I’m not able to go, when I have something, I tend to not quickly go to someone, but I tend to write it in our agenda. So when we talk we have a big agenda to get through. And the kind of, the risk of that is not talking about the important things, but only about the small things, so that’s something I’m still trying to improve.\n\nHave questions? Tweet us at [@Formassembly](https://twitter.com/FormAssembly?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor) and [@GitLab](https://twitter.com/gitlab).\n","culture",[9,2204],"remote work",{"slug":2206,"featured":6,"template":734},"pick-your-brain-interview-cedric-savarese","content:en-us:blog:pick-your-brain-interview-cedric-savarese.yml","Pick Your Brain Interview Cedric Savarese","en-us/blog/pick-your-brain-interview-cedric-savarese.yml","en-us/blog/pick-your-brain-interview-cedric-savarese",{"_path":2212,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2213,"content":2219,"config":2225,"_id":2227,"_type":14,"title":2228,"_source":16,"_file":2229,"_stem":2230,"_extension":19},"/en-us/blog/pick-your-brain-interview-jake-stein",{"title":2214,"description":2215,"ogTitle":2214,"ogDescription":2215,"noIndex":6,"ogImage":2216,"ogUrl":2217,"ogSiteName":720,"ogType":721,"canonicalUrls":2217,"schema":2218},"Open source lessons learned: My interview with GitLab’s CEO","Stitch CEO and co-founder Jake Stein sits down for a pick your brain meeting with GitLab CEO Sid Sijbrandij.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680388/Blog/Hero%20Images/pyb-jake-stein.jpg","https://about.gitlab.com/blog/pick-your-brain-interview-jake-stein","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Open source lessons learned: My interview with GitLab’s CEO\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jake Stein\"}],\n        \"datePublished\": \"2017-08-18\",\n      }",{"title":2214,"description":2215,"authors":2220,"heroImage":2216,"date":2222,"body":2223,"category":729,"tags":2224},[2221],"Jake Stein","2017-08-18","\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](https://handbook.gitlab.com/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or discussion of other things related to GitLab._\n\nWhen we launched Singer, our [open source ETL project](https://www.singer.io/) at [Stitch](https://www.stitchdata.com/), I was looking for advice on the best strategies to make it successful. August Capital is an investor in both Stitch and GitLab, and they were kind enough to introduce me to Sid Sijbrandij, CEO of GitLab. Sid was very generous with his time, and he shared some of his lessons learned about open source.\n\n\u003C!-- more -->\n\n## GitLab’s unique approach\n\nAs I explained Stitch to Sid, he asked a few follow up questions, and then shared information about a plan to build up the GitLab analytics stack. I didn’t set up the call intending to sell, but before it was over, he had added us to the publicly accessible page listing the tools that their team plans to evaluate. Their transparency is very impressive, and it eliminates the friction that can slow down a traditional company.\n\n## Open source adoption\n\nVirtually all of GitLab’s paying customers have come from their open source user base. While GitLab has a large sales team, they are primarily focused on converting users to the paid products rather than getting new GitLab users.\n\nOver 100,000 organizations use GitLab, and their product and engineering teams are responsible for growing that number. One of most important drivers of that growth has been improving the first run experience and time to value.\n\nWe already had plans to improve the Singer user experience, but Sid encouraged me to take it a step further. The most common use case for Singer, and ETL in general, is pulling data into a database and then visualizing the data. He recommended that we bundle Singer with a PostgreSQL database and an open source visualization tool like Metabase into a easy-to-use package, potentially in a Docker container, which will allow users to get to their end goal much faster.\n\nThis was a really interesting idea that had not occurred to our team before. It motivated us to start thinking more holistically about the goals of our open source users, and I’m confident that this will help us grow adoption of Singer.\n\n## Open source business model\n\nGitLab started as a free, open source tool and later introduced an enterprise edition and the free SaaS version of GitLab.com. Several years later, in April of 2017, they introduced paid tiers on GitLab.com.\n\nWe’ve taken a very different path with Stitch. We launched with a freemium SaaS service, and subsequently added an enterprise edition of the SaaS product and the free, open source Singer project.\n\nI thought that the differences in GitLab’s path might have been due to a philosophical decision about business model sequence, but it was much more practical. GitLab started as an open source project, and a business was created around it only after the project had significant traction. In the early days of the business, on-premises was where all of the usage was, so that’s where they started to charge. The original SaaS product was free so it could get traction and build a network effect. As the SaaS product got better, and as the cost of hosting the ever-growing number of SaaS users increased, they launched paid tiers.\n\nWhile Stitch and GitLab had very different beginnings, our business models have evolved in a similar direction. It was great to get the benefit of the lessons that Sid has learned as we chart our own course.\n\n## About the Guest Author\n\nJake Stein is the co-founder and CEO of Stitch. Prior to Stitch, Stein was co-founder and COO at RJMetrics, a business intelligence software company that was acquired by Magento in 2016. Before founding RJMetrics, Jake worked at Insight Venture Partners, a software-focused venture capital and private equity firm. He graduated from the Wharton School at the University of Pennsylvania with high honors and concentrations in Finance and Entrepreneurship.\n",[9,731],{"slug":2226,"featured":6,"template":734},"pick-your-brain-interview-jake-stein","content:en-us:blog:pick-your-brain-interview-jake-stein.yml","Pick Your Brain Interview Jake Stein","en-us/blog/pick-your-brain-interview-jake-stein.yml","en-us/blog/pick-your-brain-interview-jake-stein",{"_path":2232,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2233,"content":2239,"config":2246,"_id":2248,"_type":14,"title":2249,"_source":16,"_file":2250,"_stem":2251,"_extension":19},"/en-us/blog/pick-your-brain-interview-kwan-lee",{"title":2234,"description":2235,"ogTitle":2234,"ogDescription":2235,"noIndex":6,"ogImage":2236,"ogUrl":2237,"ogSiteName":720,"ogType":721,"canonicalUrls":2237,"schema":2238},"GitLab CEO interview: Building the best distributed Dev team","FineTune CTO Kwan Lee sits down for a 'pick your brain' meeting with GitLab CEO Sid Sijbrandij.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680355/Blog/Hero%20Images/pyb-kwan-lee.jpg","https://about.gitlab.com/blog/pick-your-brain-interview-kwan-lee","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to become the best distributed software development team? My interview with GitLab's CEO\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Kwan Lee\"}],\n        \"datePublished\": \"2017-09-15\",\n      }",{"title":2240,"description":2235,"authors":2241,"heroImage":2236,"date":2243,"body":2244,"category":2202,"tags":2245},"How to become the best distributed software development team? My interview with GitLab's CEO",[2242],"Kwan Lee","2017-09-15","\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](https://handbook.gitlab.com/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or discussion of other things related to GitLab._\n\nIt was great to find the time for us to pick Sid’s brain and learn from the history and the organizational challenges that GitLab had overcome so that we may reference them for building a better organization. There were some cultural elements, tactical organizational elements and software development process-related elements that were valuable pointers.\n\n\u003C!-- more -->\n\n## Lessons in remote work\n\nHaving 178 people in 38 countries was quite an impressive distribution of employees across different geographies. Sponsoring travel to work with fellow members of the company was a great program that they have to bridge the distributed nature of the company. We are also a very distributed company and we want to grow a company where a distributed team can scale and collaborate actively while continuously increasing the motivation to build higher quality software at higher velocity for our customers.\n\nOne of the challenges of being remote is that, although we are part of one company, it is tricky for us to interact in a casual manner as we do in a physical co-working environment. GitLab promotes virtual coffee breaks and all-team meetings to promote these. People can arrange coffee breaks with others at their will to catch up. During all-team meetings, they go around introducing personal updates about themselves. The team being remote requires everybody to still feel part of one company. In order to feel part of the company, it requires participation from everybody and their willingness to share their personal lives.\n\n>In order to feel part of the company, it requires participation from everybody and their willingness to share their personal lives.\n\nAt [FineTune](https://www.finetunelearning.com/), we are not used to taking coffee breaks during the day with coworkers, but we try to have regular meetings where we try to catch up personally for the first five minutes. Our weekly company meetings have been a little bit informal and did not give opportunity for each of the members to speak. We plan to keep encouraging people to share more as we want to grow a culture of sharing more as we grow and scale.\n\nSid also described various rooms for social interaction in the company. Some more interesting venues for social interaction that were suggested by Sid were:\n\n* Team call four times a week (20 minutes)\n* Summit: every nine months they fly everybody to one place, for interactions that are less organized with scheduled activities and forbidden to have team meetings ('unconference')\n* Asynchronous discussions via merge requests, while they sometimes get on video call to summarize what has been concluded or decided\n\nWriting things down is important due to the remote nature of the company. We have been pretty bad at keeping consistent standards on documentation and keeping them up to date. It also hinders communication flow, making it difficult to discover and share knowledge when we do not have such consistency. We are working on ways to improve this nowadays.\n\n>Writing things down is important due to the remote nature of the company\n\n## Lessons in organization\n\nWhen it comes to organization and growth, what we got most out of it was that we need to find the gaps in our team and try to fill in those parts we lack when we hire new members. Currently, we have a gap in frontend tech lead, and by thinking through what gaps exist in our development and future of our company we found that we would like to find a tech lead who has extensive experience modularizing frontend software components and has worked with complex microservice APIs that would facilitate the flow of communication between frontend and backend members.\n\nSome other organizational lessons learned from growing from 15 to 50 was that:\n* Sid was the only Sales (non-development) team member\n* Get things done on time and having well defined tasks are very important\n* One boss to give approvals\n* No project managers\n\nWe want to organize ourselves so that we are making decisions quickly and moving fast. I believe that as long as the priority framework for decision-making is clear, everybody should feel free to make the decisions that move the company forward.\n\n## Lessons in the development process\n\nIn terms of development process, we realized we needed to shorten the time to release and try to keep shipping. Importance of fast iteration was emphasized by Sid and shipping fast by cutting scope.  We should not fall into the trap of building the car and not shipping when the bicycle is ready.  We also need more discipline to maintain good, coherent design documentations that allows us to be all on the same page.\n\nIt was interesting to see the scale of work that the distributed team worked on. When a new sprint starts, product and UX team already had designed and product team had schedules for release. Ad hoc dev teams get formed for big features (every release has around five big features and 100s of small issues), making a chat channel, discussing issue descriptions, figuring out when you hand off from backend to the frontend team.\n\n>\"always finish the flow first\"\n\nGitLab's approach of \"always finish the flow first\" (breadth vs depth) to take care of coordination resonated strongly since that involves more people and requires people to be on the same page to further dive in deeper. Also, the \"building better a experience\" and \"releasing a more integrated experience\" brings a lot of emergent benefits.\n\nSome mistakes seen were people having hard time iterating and sometimes over engineering implementation which risks release deadline. Everyone can contribute, but at the end person doing the work makes the decision.\n\n## Lessons in leadership\n\nAs a final question, we asked what prevents a software company from growing.  The answer we got was the lack of ambition. We as the founders or development team may not be as ambitious as we should be. Our company has been in the ed tech industry for 10 years and had not seen much growth. What we realized was that our goals and bars were set too low. We have a lot of strong design and engineering capability that we have built up over the last six months and now it is our time to think and act with more ambitious goals. There is a lot of value in helping people to write better and measure the quality of written content since most communication nowadays is done via writing.\n\nWe want to become an important company that helps with not only K-12, higher-ed education in EdTech industry, but also with professional development and employability across any industry that requires written communication to succeed. We have a long way to go, but the invaluable discussion we had with Sid informed us some of the good practices to follow and a trajectory to aim for around our next growth path.\n\n[Cover image](https://unsplash.com/@blakeconnally?photo=B3l0g6HLxr8) by [Blake Connally](https://unsplash.com/@blakeconnally) on Unsplash\n{: .note}\n",[9,2204,1792],{"slug":2247,"featured":6,"template":734},"pick-your-brain-interview-kwan-lee","content:en-us:blog:pick-your-brain-interview-kwan-lee.yml","Pick Your Brain Interview Kwan Lee","en-us/blog/pick-your-brain-interview-kwan-lee.yml","en-us/blog/pick-your-brain-interview-kwan-lee",{"_path":2253,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2254,"content":2260,"config":2267,"_id":2269,"_type":14,"title":2270,"_source":16,"_file":2271,"_stem":2272,"_extension":19},"/en-us/blog/pick-your-brain-interview-vincent-jong",{"title":2255,"description":2256,"ogTitle":2255,"ogDescription":2256,"noIndex":6,"ogImage":2257,"ogUrl":2258,"ogSiteName":720,"ogType":721,"canonicalUrls":2258,"schema":2259},"Key decisions for building successful startups","Vincent Jong of SaaS.CEO sits down for a 'pick your brain' meeting with GitLab CEO Sid Sijbrandij.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680253/Blog/Hero%20Images/pick-your-brain-interview-thrive.jpg","https://about.gitlab.com/blog/pick-your-brain-interview-vincent-jong","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Saas.CEO and Sid Sijbrandij talk key decisions, influential connections, and strategic vision when building a startup\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vincent Jong\"}],\n        \"datePublished\": \"2018-01-26\",\n      }",{"title":2261,"description":2256,"authors":2262,"heroImage":2257,"date":2264,"body":2265,"category":2202,"tags":2266},"Saas.CEO and Sid Sijbrandij talk key decisions, influential connections, and strategic vision when building a startup",[2263],"Vincent Jong","2018-01-26","\n_GitLab CEO Sid Sijbrandij occasionally sits down for a \"[pick your brain](https://handbook.gitlab.com/handbook/eba/ceo-scheduling/#pick-your-brain-meetings)\"\nmeeting with people seeking advice on open source, remote work, or discussion of other things related to GitLab._\n\n\u003C!-- more -->\n\n**GitLab has become a leading provider in software development solutions, but it didn’t start out like that. Looking back, what were the one or two decisions that really made the company to the success it is today?**\n\nThe first one is the decision to build a company around it, because GitLab started as an open source project without a company. As such a project gets bigger, you will have to pay people to keep the quality high.\n\nAnother thing was my co-founder Dmitriy tweeting \"I want to work on GitLab full time,\" which led me to contact him and hire him, which was a great change.\n\nThis may be atypical advice on a SaaS CEO interview series, but one thing we did right was not to focus on SaaS. The demand for GitLab was coming from the self-managed side much more than from the SaaS side, so we decided to focus on that first.\n\nThe final one was the decision to apply to Y Combinator. This changed our ambition level from just running the project to being a market leader.\n\n**Would you say that your focus on the self-managed product also allowed you to focus on a different market segment than where players like GitHub were already capturing market share?**\n\nWhen we started, GitHub and Atlassian were already there in that market and it should have been locked up. But they left an opening in the self-managed market and at the bottom of the market.\n\nIn the beginning our software wasn’t very good, but we were able to rapidly make it better and grow upmarket. This is a great thing because I think today most of the revenue is coming from those large accounts.\n\nThe way I see it, source code management is one of the last things to leave self hosted for SaaS. Where this happened much earlier for CRM for example, I think source code for various reasons is transitioning later. We still see that for companies with more than 5,000 employees, 95 percent is still self hosted.\n\n**Alright. Looking at a more general perspective, what would you say you understand about building a (SaaS) company that is often overlooked or underestimated by other founders?**\n\nWhat we do differently is that we write things down. We’re a remote-only company of 200 people working from 200 locations. We try to work as asynchronously as we can and we write down what we do. The output of that is a [company handbook](https://handbook.gitlab.com/handbook/) with over 500 pages of our processes.\n\nFor a fast growing company, it is important that new people know the customs and values of the organization. Spending a lot of time to verbally communicate this is time consuming and dilutive, because you are never going to be able to tell person 100 as well as you’ve told the first. However, when you write it down, which is very painful in itself, person 100 will have an even more detailed version than person 1. So it gets better over time.\n\n**Then let’s talk about the people you’ve worked with. For startups, connecting with the right people can be a game-changer. One person can provide a connection that changes everything. If you look at people who are not employed at GitLab – which person provided essential additional value and how did you get in touch with this person?**\n\nJoining Y Combinator has been essential for us. It opened up lots of doors that would otherwise have been closed. For example, the seed round of investors we have with people like Ashton Kutcher and Michael Arrington. I don’t think they would have even looked at us if it wasn’t for Y Combinator.\n\nThen your board members are just very important. We got lucky with our first board member, Bruce Armstrong, operating partner at Khosla Ventures, who was very thoughtful with us and very hardworking in helping us every step along the way. That felt very empowering and it’s not always the case with venture capitalists, so that was awesome.\n\nSometimes it’s just reaching out. Like Matt Mullenweg who joined our board. He is the CEO of Automattic, the makers of WordPress. I just sent him an email saying “Hey, can we talk?” If you show you’ve done your homework, like mentioning why you want to talk and reference a blog post or something they tweeted, people are more likely to respond.\n\n**One of the things we do at [SaaS.CEO](https://www.saas.ceo/) is ask our audience beforehand if they have any questions for the CEO who is being interviewed. This time two questions came up. The first is coming from Michael Kamleiter, CEO of Swat.io and Walls.io. He asks \"How do you go about positioning towards other players like GitHub, especially when you were still a smaller company?\"**\n\nI don’t think we’ve figured it out yet. Where our competition was sometimes more focused on the needs of open source projects, we focused on those large customers and their requirements. For example, our competition has two levels of authorization and we have five, because our customers need more granularity.\n\nPositioning to me is mostly marketing and I think we have lagged in that regard. Actually, the last two days I have been in a workshop to figure out our positioning. What we’re going to do is articulate that GitLab is an end-to-end tool. Where all the other applications are about assembling a toolchain and orchestrating that toolchain, we want to be \"toolless.\"\n\nIf you have a toolchain, you end up having all these handoffs that create delays from working in serial. We want people with GitLab to be able to work in parallel. I think that will be a big enabler of our future growth. But it’s a really hard thing to determine, to get everybody aligned on, and then to roll it out on all your channels, from product to sales to marketing.\n\n**The second question we received is from Florian Dorfbauer, CEO of Usersnap. His question is: \"With the latest investment round, you've also revealed the bigger vision of GitLab: providing a complete DevOps experience. How much time do you spend on strategic vision building and what does the process look like to work on such strategies?\"**\n\nI consider myself a Product CEO and spend most of my time on our product. The way I spend time on this is first of all by talking with customers. My call before this was with a potential customer, to answer their questions. It’s great to be able to talk directly with customers.\n\nI also keep an eye on our issue tracker and [Hacker News](https://news.ycombinator.com/), which are important channels for me. Apart from that I work a lot with our product managers where we try to get the best out of each other.\n\nIt’s all driven by what you know about where the market is – what are the trends, what are the analysts saying, what are customers saying, what are users saying. All these things come together and you reflect on it with each other and choose a direction.\n\n**By sharing your experiences, you have given valuable input other to SaaS CEOs out there. Therefore, I want to give you the opportunity to ask something in return. Is there something our listeners can do for you?**\n\nI think it would be great that those who read this reach out to you to be interviewed so you will have more content and we can make this a bigger thing. Then when this becomes a famous podcast I can claim to be the first one ever to be interviewed.\n\nSecondly, I would like to take the opportunity to say that GitLab.com is becoming a great product now, so I hope that in 2018 people will give it a shot and try it out.\n\n**Sid, thank so much for sharing your insights. I’m very happy to have had you as our first interviewed CEO and we do hope many of the readers and listeners will follow your request.**\n\n### About the guest author\n\nVincent is a Dutch serial entrepreneur excited about advanced technology and Software as a Service solutions. While building his company, he noticed how many founders are trying to get in touch with the same people: CEOs who have already walked the path they are going. Facing the same challenge, he founded [SaaS.CEO](https://www.saas.ceo/), a platform to make successful SaaS founders more accessible. His own company [Thrive for Email](http://www.thrive.email/) is an AI-driven sales automation solution that helps sales reps increase their capacity by automatically entering all data into the CRM.\n\n[Cover image](https://unsplash.com/photos/kRnkqSKZODQ) by [Federico Beccari](https://unsplash.com/@federize) on Unsplash\n{: .note}\n",[9,755],{"slug":2268,"featured":6,"template":734},"pick-your-brain-interview-vincent-jong","content:en-us:blog:pick-your-brain-interview-vincent-jong.yml","Pick Your Brain Interview Vincent Jong","en-us/blog/pick-your-brain-interview-vincent-jong.yml","en-us/blog/pick-your-brain-interview-vincent-jong",{"_path":2274,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2275,"content":2281,"config":2288,"_id":2290,"_type":14,"title":2291,"_source":16,"_file":2292,"_stem":2293,"_extension":19},"/en-us/blog/pipelines-as-code",{"title":2276,"description":2277,"ogTitle":2276,"ogDescription":2277,"noIndex":6,"ogImage":2278,"ogUrl":2279,"ogSiteName":720,"ogType":721,"canonicalUrls":2279,"schema":2280},"Pipelines-as-Code: How to improve speed from idea to production","Pipelines-as-Code streamline automatic building, testing, and deploying of applications using prebuilt pipelines and infrastructure components. Here's how it works.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/pipelines-as-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pipelines-as-Code: How to improve speed from idea to production\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Robert Williams\"}],\n        \"datePublished\": \"2022-01-18\",\n      }",{"title":2276,"description":2277,"authors":2282,"heroImage":2278,"date":2284,"body":2285,"category":752,"tags":2286},[2283],"Robert Williams","2022-01-18","\nToday’s DevOps platform-centric world is moving steadily towards an \"Everything-as-Code\" mentality. Add in cloud native, and it's clearly even more important to standardize how you define your DevOps processes.\n\n## Why ‘as-Code’?\n\nThanks to faster iteration, cloud native computing, and [microservices-based architectures]\n(https://about.gitlab.com/topics/microservices/), as-Code technologies have become the de-facto standard for a lot of different parts of the software development lifecycle. \n\nThe need to release faster requires a single spot for teams to collaborate on any kind of change – code, infrastructure, configuration, networking, or testing. And to implement that change quickly we need to be able to see and review it before it goes into production. \n\nAs-Code solutions are at the core of cloud native technologies such as Kubernetes, where you utilize YAML or JSON formats to configure and manage. Here are the key advantages of 'as-Code':\n\n- auditability\n- scalability\n- efficiency\n- collaboration\n\nThese benefits come into play with every piece of technology that moves into as-Code; we have seen it time and again as DevOps processes mature and we automate each piece of the software development lifecycle. Here are the critical 'as-Code' stages: \n\n### Build-as-Code\n\nOne of the first steps when building a new pipeline is to implement a way to build your application automatically. Containerization is one of the most common ways: You define your build steps as a Dockerfile and then you have automated the build of the application.\n\n### Test-as-Code\n\nAs our deployment frequency and team size scales, the need for test cases to be automated scales as well. So we automate, we write unit tests and test scripts to execute unit tests, and then we ensure the changes can be continuously integrated safely, without introducing unplanned bugs.\n\n### Security-as-Code\n\nTo ensure software gets to market quickly, security must be included in your testing process. The testing has to happen either through tools integrated with each individual project, or implemented as code, creating job templates for security scanners that can be ingested by projects as required. These steps enable teams to quickly become compliant with various security frameworks (like PCI-DSS, HIPAA,,or ISO) as they become relevant for the project.\n\n### Deployment-as-Code\n\nDeployments need to be standardized so they are predictable every time. To ensure successful peer review, production and development environment deployments need to be the same, and there's an added bonus of a quality gate between them. Through scripting and implementation of Deployment-as-Code, we end up with the ability to continuously deploy code and continuously deliver value.\n\n## Why Pipelines-as-Code?\n\nPipelines are the center of the CI/CD workflow – they're the automation heart that powers all of the benefits of as-Code technologies. Once you have the Build-as-Code, Test-as-Code, Deployment-as-Code, Infrastructure-as-Code, and Configuration-as-Code, you have all the parts needed to ensure that you can reliably and predictably take your application into production environments. But, to move changes in with agility, you need to take all those parts and string them together into a pipeline.\n\nThe technology behind Pipelines-as-Code makes it possible to create centralized repositories for your organization's pipelines. Pipelines-as-Code can be set up to fit all boxes for varied languages and use cases (like [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/)) or with a [number of options](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates) so that developers can pick base pipelines to fit their use case. It's important to have a baseline that conforms to the organization's standards because that always increases the speed to production.\n\nThe entire team can collaborate on changes to each part of the workflow. Version history can be easily maintained in the same version control system as everything else that touches the DevOps lifecycle.\n\nThe benefits of as-Code technology reach a pinnacle with Pipelines-as-Code, so teams gain increases in efficiency, scalability, auditability, and collaboration. Pipelines-as-Code are at the center of automated GitOps, DevOps, and SecOps workflows.\n",[2287,9,563],"AI/ML",{"slug":2289,"featured":6,"template":734},"pipelines-as-code","content:en-us:blog:pipelines-as-code.yml","Pipelines As Code","en-us/blog/pipelines-as-code.yml","en-us/blog/pipelines-as-code",{"_path":2295,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2296,"content":2302,"config":2308,"_id":2310,"_type":14,"title":2311,"_source":16,"_file":2312,"_stem":2313,"_extension":19},"/en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"title":2297,"description":2298,"ogTitle":2297,"ogDescription":2298,"noIndex":6,"ogImage":2299,"ogUrl":2300,"ogSiteName":720,"ogType":721,"canonicalUrls":2300,"schema":2301},"GitOps & DevSecOps for production infrastructure in minutes","Unlock production-grade infrastructure and development workflows in under five minutes with Five Minute Production App: a blend of solutions offered by AWS, Hashicorp Terraform, and GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665839/Blog/Hero%20Images/devops.png","https://about.gitlab.com/blog/production-grade-infra-devsecops-with-five-minute-production","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sri Rangan\"}],\n        \"datePublished\": \"2021-02-24\",\n      }",{"title":2303,"description":2298,"authors":2304,"heroImage":2299,"date":2305,"body":2306,"category":752,"tags":2307},"Production-grade infrastructure, GitOps convergence, and DevSecOps in under 5 minutes",[726],"2021-02-24","This blog post was originally published on the GitLab Unfiltered\nblog. It was reviewed and republished on\n2021-03-10.\n\n{: .note .alert-info .text-center}\n\n\nThis is a story about achieving production-grade infrastructure in under\nfive minutes.\\\\\n\nThis is a story about achieving production-grade DevSecOps in under five\nminutes.\\\\\n\nThis is a story about achieving total convergence of GitOps in under five\nminutes.\n\n\nMy name is Sri and over the last three months and I worked closely with\nGitLab co-founder [DZ](/company/team/#dzaporozhets) in building \"Five Minute\nProduction App.\"\n\n\nThe app blends solutions offered by AWS, Hashicorp Terraform, and GitLab,\nand offers production-grade infrastructure and development workflows in\nunder five minutes.\n\n\n![Five Minute Production App\nDiagram](https://about.gitlab.com/images/blogimages/five-min-prod-01-complete-flow.png){:\n.shadow.medium.center}\n\n\nApart from the efficiencies gained from using Five Minute Production App,\nyou benefit by achieving stateful, production-ready infrastructure on the\nAWS hypercloud.\n\n\nWe started with AWS first, as it is the hypercoud leader today. Support for\nAzure and Google Cloud is on the roadmap.\n\n\nOur vision and design decisions are explained in the\n[README](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#quickly).\n\n\n## Quickstart \n\n\nWe start with your GitLab project which has the source code of your web\napplication. Regardless of which language or framework you use, your web\napplication is packaged as a container image and stored within your GitLab\nproject's Container Registry.\n\nThis is the Build stage.\n\n\nThis is followed by the Provision stage where Terraform scripts connect to\nAWS and create a secure environment for your web application.\n\nThe environments provisioned relate to your Git branching workflow.\n\nLong-lived Git branches create long-lived environments, and short-lived Git\nbranches correspond to short-lived environments.\n\n\nResources provisioned include an Ubuntu VM, scalable PostgreSQL database, a\nRedis cluster, and S3 object storage.\n\nWe consider these elements as the building blocks for majority of web\napplications, and many of these fall under AWS free tier.\n\n\nThe infra state and credentials are stored within your GitLab project's\nmanaged Terraform state.\n\n\nFinally, we reach the Deploy stage which:\n\n1. Retrieves the deployable image from the GitLab Container Registry\n\n1. Retrieves the infrastructure credentials from the Gitlab Managed\nTerraform State, and\n\n1. Proceeds to deploy your web application\n\n\nEverything is achieved by including these two lines in your `.gitlab-ci.yml`\nfile.\n\n\n```yaml\n\ninclude:\n  remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n```\n\n\nLet's look at the complete process in more detail.\n\n\n![Three stages of Five Minute Production\nApp](https://about.gitlab.com/images/blogimages/five-min-prod-02-pipeline.png){:\n.shadow.medium.center}\n\nThe three stages of Five Minute Production App\n\n{: .note.text-center}\n\n\n## Build and package\n\n\nThe Build stage is where it all begins. Five Minute Production App reuses\nthe [Auto Build\nstage](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-build)\nfrom the GitLab Auto DevOps pipeline.\n\n\nAuto Build builds and packages web applications that are:\n\n1. Containerized with a Dockerfile, or\n\n2. Compatible with the Cloud Native buildpack, or\n\n3. Compatible with the Heroku buildpack\n\n\nThus, web applications across multitudes of technologies are supported,\nincluding web frameworks such as Rails, Django, Express, Next.js, Spring,\netc.\n\nand programming languages including Python, Java, Node.js, Ruby, Clojure,\netc.\n\n\nOnce the Auto Build job has finished execution, the newly created container\nimage is stored as an artifact in your GitLab project's Container Registry.\n\n\n## Provision the infrastructure\n\n\nThe next step, Provision, prepares infrastructure resources in AWS.\n\nThe first requirement here is the presence of AWS credentials stored as\nCI/CD variables at the project or group level.\n\nOnce valid AWS credentials are found, a Terraform script is executed to\ngenerate resources in AWS.\n\n\nThese resources include:\n\n1. EC2 VM based on Ubuntu 20.04 LTS\n\n2. PostgreSQL database managed by AWS RDS\n\n3. Redis cluster managed by AWS ElastiCache\n\n4. S3 bucket for file storage\n\n5. Email Service credentials managed by AWS SES\n\n\nThe most critical resource is the PostgreSQL service which has daily backups\nenabled.\n\nPostgreSQL data is snapshotted if the infrastructure resource is \"destroyed\"\nthrough a manual user action via the Five Minute Production App pipeline.\n\n\nThe EC2 VM is the only service accessible publicly. Ports 22, 80 and 443 are\nexposed.\n\nEvery other resource described above is part of a secure, private network,\nhidden from the public web, accessible ony via the EC2 instance and your web\napplicable deployed there.\n\n\nThe stateful services and environments are tied to your Git branches.\\\\\n\nThis means every Git branch creates a new environment with these resource\nsets.\\\\\n\nWe don't have a preference on your Git branching and environments\nlifecycle.\\\\\n\nUse long-lived or short-lived branches as you see fit, just keep in mind\nthat long-lived branches leads to long-lived environments and short-lived\nbranches leads to short-lived environments.\n\n\n![Infrastructure resources provisioned on\nAWS](https://about.gitlab.com/images/blogimages/five-min-prod-03-infra-resources.png){:\n.shadow.medium.center}\n\nInfrastructure resources provisioned on AWS\n\n{: .note.text-center}\n\n\n## Deploy your web application\n\n\nFinally comes the Deploy stage.\n\n\nThis is where the deploy script retrieves your web application package\n(container image) from the GitLab Container Registry, then retrieves the EC2\ninstance\n\ncredentials from the GitLab Managed Terraform State, and proceeds to deploy\nthe relevant version of your web application in its environment.\n\n\nModern web applications might require additional commands being executed\nafter each deployment or after the initial deployment,\n\nand these commands can be defined as variables in your `.gitlab-ci.yml`\nfile.\n\n\nFinally, with the help of Certbot from Letsencrypt, SSL certificates are\ngenerated and configured for your web application.\n\nIf you have defined the `CERT_DOMAIN` CI/CD variable the SSL certificate\nwill be generated for your custom domain name.\n\nOtherwise the generated SSL certificate uses a dynamic URL that Five Minute\nProduction App prepares for you.\n\n\n## Conclusion\n\n\nThere we have it. A simple yet production-ready setup for your web\napplication. If you are looking for an AWS-based setup, this is ready for\nusage.\n\n\nIf you are looking for something similar but not quite Five Minute\nProduction App, this serves as an example of how to converge\ninfrastructure-as-code with software development and provide seamless\ncontinuous deployment workflows.\n\n\nIn my personal experience, this is one of the most complete examples of\nGitOps:\n\n\n1. Your application source code lives in your GitLab project\n\n2. Your infrastructure defined as code lives in your GitLab project\n\n3. Your CI/CD pipeline lives in your GitLab project\n\n4. Your infrastructure state lives in your GitLab project\n\n5. Your infrastructure secrets and credentials live in your GitLab project\n\n6. Your environments configuration lives in your GitLab project\n\n\nThis complete GitOps convergence is not specifically configured for one\nproject. It can be included as a template from multiple projects.\n\nThere is no reason why the GitLab project in your organization cannot be the\nsingle source of truth for everything.\n\n\n### Links\n\n\n- [Five Minute Production\nApp](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md)\n\n- [Reference\nExamples](https://gitlab.com/gitlab-org/5-minute-production-app/examples)\n\n\n### About the author\n\n\n[Sri Rangan](mailto:sri@gitlab.com), an Enterprise Solutions Architect with\nGitLab, is a core-contributor and maintainer\n\nof [Five Minute Production\nApp](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/README.md).\n",[1021,1062,563,9,558,1103],{"slug":2309,"featured":6,"template":734},"production-grade-infra-devsecops-with-five-minute-production","content:en-us:blog:production-grade-infra-devsecops-with-five-minute-production.yml","Production Grade Infra Devsecops With Five Minute Production","en-us/blog/production-grade-infra-devsecops-with-five-minute-production.yml","en-us/blog/production-grade-infra-devsecops-with-five-minute-production",{"_path":2315,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2316,"content":2322,"config":2329,"_id":2331,"_type":14,"title":2332,"_source":16,"_file":2333,"_stem":2334,"_extension":19},"/en-us/blog/pull-based-kubernetes-deployments-coming-to-gitlab-free-tier",{"title":2317,"description":2318,"ogTitle":2317,"ogDescription":2318,"noIndex":6,"ogImage":2319,"ogUrl":2320,"ogSiteName":720,"ogType":721,"canonicalUrls":2320,"schema":2321},"Pull-based GitOps moving to GitLab Free tier","Learn how this change provides organizations increased flexibility, security, scalability, and automation in cloud-native environments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670178/Blog/Hero%20Images/GitLab-Ops.png","https://about.gitlab.com/blog/pull-based-kubernetes-deployments-coming-to-gitlab-free-tier","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pull-based GitOps moving to GitLab Free tier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"},{\"@type\":\"Person\",\"name\":\"Lauren Minning\"}],\n        \"datePublished\": \"2022-05-18\",\n      }",{"title":2317,"description":2318,"authors":2323,"heroImage":2319,"date":2325,"body":2326,"category":1250,"tags":2327},[2159,2324],"Lauren Minning","2022-05-18","\n\nGitLab will include support for pull-based deployment in the platform’s Free tier in an upcoming release, which will provide users increased flexibility, security, scalability, and automation in cloud-native environments. With pull-based deployment, DevOps teams can use the [GitLab agent for Kubernetes](/blog/introducing-the-gitlab-kubernetes-agent/) to automatically identify and enact application changes. \n\n“DevOps teams at all levels benefit from utilizing GitOps strategies such as pull-based deployment in their cloud-native environments. By offering this feature in GitLab’s Free tier, we can introduce more organizations to the power and utility of this secure and scalable functionality,” says [Viktor Nagy](https://gitlab.com/nagyv-gitlab), product manager of GitLab’s Configure Group.\n\nAs an open-core company, GitLab is happy to contribute to the GitOps community and enable the adoption of best practices in the industry.\n\n## What is pull-based deployment?\n\nPull-based and push-based deployment are [two main approaches to GitOps](/topics/gitops/), an operational framework that takes DevOps best practices used for application development such as version control, collaboration, compliance, and [CI/CD](/topics/ci-cd/) tooling, and applies them to infrastructure automation. \n\nGitOps enables operations teams to [move as quickly as their application development counterparts](/blog/gitops-done-3-ways/) by making use of automation and scalability, without sacrificing security. \n\nWhile push-based, or agentless, deployment relies on a CI/CD tool to push changes to the infrastructure environment, pull-based deployment uses an agent installed in a cluster to pull changes whenever there is a deviation from the desired configuration. In the pull-based approach, deployment targets are limited to Kubernetes and an agent must be installed in each Kubernetes cluster.\n\n“As long as the GitLab agent for Kubernetes on your infrastructure has the necessary access rights in your cluster, you can configure everything automatically, reducing the DevOps workload and the opportunity to introduce errors,” Nagy says.\n\n## Pull-based deployment vs. push-based deployment\n\nPush-based deployment and pull-based deployment each have their pros and cons. Here is a list of the advantages and disadvantages of each GitOps practice:\n\nPush-based deployment pros:\n- ease of use\n- well-known as part of CI/CD\n- more flexible, as deployment targets can be on physical servers or virtual containers, not restricted to Kubernetes clusters \n\nPush-based deployment cons:\n- requires organizations to open their firewall to a cluster and grant admin access to external CI/CD\n- requires organizations to adjust their CI/CD pipelines when they introduce new environments\n\nPull-based deployment pros:\n- secure infrastructure - no need to open your firewall or grant admin access externally\n- changes can be automatically detected and applied without human intervention\neasier scaling of identical clusters\n\nPull-based deployment cons:\n- agent needs to be installed in every cluster\n- limited to Kubernetes only\n\n## How pull-based deployment impacts the Free-tier experience\n\nIncluding support for pull-based deployments in GitLab’s Free tier provides a tremendous competitive advantage for smaller organizations as they can now apply automation in a safe and scalable manner to their cloud-native infrastructure, including virtual containers and clusters. And, for organizations that are trying to get started quickly by minimizing the number of tools in their infrastructure ecosystem, this functionality is included in One DevOps Platform, not as a point solution. \n\n“DevOps teams don’t have to continuously write code for new infrastructure elements – they can write the code once, within a single DevOps platform, and have the agent automatically find it, pull it, and apply it, as well as configuration changes,” Nagy says. “Also, with the availability of pull-based deployment in this introductory tier, newcomers to GitLab will immediately be able to modernize application development and reduce the security risk associated with configuring such infrastructure.”\n\n_This blog post contains information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n\n\n\n\n\n\n",[2328,859,979,9,558],"DevOps platform",{"slug":2330,"featured":6,"template":734},"pull-based-kubernetes-deployments-coming-to-gitlab-free-tier","content:en-us:blog:pull-based-kubernetes-deployments-coming-to-gitlab-free-tier.yml","Pull Based Kubernetes Deployments Coming To Gitlab Free Tier","en-us/blog/pull-based-kubernetes-deployments-coming-to-gitlab-free-tier.yml","en-us/blog/pull-based-kubernetes-deployments-coming-to-gitlab-free-tier",{"_path":2336,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2337,"content":2343,"config":2348,"_id":2350,"_type":14,"title":2351,"_source":16,"_file":2352,"_stem":2353,"_extension":19},"/en-us/blog/pyb-all-remote-mark-frein",{"title":2338,"description":2339,"ogTitle":2338,"ogDescription":2339,"noIndex":6,"ogImage":2340,"ogUrl":2341,"ogSiteName":720,"ogType":721,"canonicalUrls":2341,"schema":2342},"How being all-remote helps us practice our values at GitLab","GitLab CEO Sid Sijbrandij and Mark Frein of InVision talk about why all-remote is the future, and moving beyond 'But how do you know they're working?'","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680686/Blog/Hero%20Images/webcast-cover.png","https://about.gitlab.com/blog/pyb-all-remote-mark-frein","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How being all-remote helps us practice our values at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sara Kassabian\"}],\n        \"datePublished\": \"2019-07-31\",\n      }",{"title":2338,"description":2339,"authors":2344,"heroImage":2340,"date":2345,"body":2346,"category":2202,"tags":2347},[855],"2019-07-31","\n\nAll-remote workplaces like GitLab and InVision are disrupting the status quo by abandoning the office and creating a new model for the ideal workplace, and employees and employers are starting to catch on. GitLab CEO [Sid Sijbrandij](/company/team/#sytses) and [Mark Frein](https://www.linkedin.com/in/mark-frein-886148/), chief people officer at product design platform [InVision](https://www.invisionapp.com/), recently met to chat about the future of remote work, leadership in a distributed company, and the values that drive their work (and why [all-remote](https://handbook.gitlab.com/handbook/company/culture/all-remote/) isn’t one of them).\n\n## Build interpersonal relationships, digitally\n\nOn your first day at GitLab or InVision, you don’t walk up to the office, put on a smile, and find your desk. Instead, you sit on your desk chair, deck, or couch, open your laptop and connect using a suite of different technologies that provide a portal into your home.\n\n“I often say, ‘How often do you invite people into your home on day one when you're starting a new job?’” says Mark. “We are already inside your most personal space. We can see your bookcase, we can see things that are important to you, we can see your cat jumping on your lap, because animals always want to make sure they’re with you on important calls.”\n\nWhen a company empowers a distributed team to embrace the inevitable interruptions of doorbells ringing, phones buzzing, and demands from pets, children, and partners, you get to know your remote teammates better than if you shared an office. People are free to share more of themselves than if they were commuting from their homes to a common area.\n\nBy sharing your home, albeit digitally, with your colleagues, it is critical that your teammates show the same degree of humility and empathy for colleagues as they do for customers.\n\nAll-remote companies that are making hiring decisions ought to search for workers that are highly skilled in their areas of expertise, as well as in interpersonal communication. It is the active listeners, clear communicators, and willing collaborators that drive progress in all-remote companies, because these interpersonal skills allow teams to breach the digital divide and make lasting contributions to the company and product.\n\nLeadership in all-remote organizations must be similarly intentional. Managers do not have the benefit of serendipity at all-remote companies; instead, they must work harder to emotionally engage with the people they lead.\n\n## Technology is driving the all-remote movement\n\nThere are three primary communication channels that connect GitLab team members and InVision team members. “I think of our right and left hands as Zoom and Slack,” says Mark. At GitLab, we primarily use our own product, as well as Zoom and Slack to connect our distributed team.\n\nThe advent of these powerful communication tools is what helps all-remote companies like GitLab and InVision exist, and is a driving factor behind the movement for workplaces to go all-remote.\n\n“I think we're just at the beginning of this movement, and a lot of what's worked has been hacked together so far,” says Mark. “I think remote is going to last as long as the history of work, and it’s just in its infancy.”\n\nThinking back to 10 or 15 years ago, communication technologies first started being used in new and unique ways to mediate relationships. Mark points to the early days of online multiplayer game, World of Warcraft, as an example of serious all-remote gaming that helped condition us to using communication technology in collaborative ways. Just like WoW unlocked online massive multiplayer gaming, tools like Zoom unlock the potential of the all-remote workplace.\n\n## But wait, how do you know if they’re working?\n\nThere are many people from outside the all-remote world that remain incredulous about the idea of a distributed team. Both Sid and Mark are often asked the same questions about all-remote: \"How do you know that people are working?\"\n\n“I view these as old workplace, old economy questions,” says Mark. “Those are usually the least interesting questions.”\n\nThe framework that “work” is a lot of people in the building at the same time minimizes the focus on each individual contributor’s work product.\n\n“In many co-located companies, you can just show up and people will presume you’re working, but at GitLab we actually check your output and results,” says Sid.\n\nThere are also many people at co-located companies who will claim they value hiring the best people for the job, or that people are the heart of their organization, a statement largely incongruous with their practices, notes Sid.\n\n“You're saying people are the most important, but you limit your hiring to 1% of the world population? Then the people who are most important, you make them commute two hours of every day?” says Sid.\n\n## The drawbacks of part-remote\n\nIn response to the demand for greater flexibility in scheduling and workplaces, there are more co-located companies that are trying out remote teams or allowing a few remote work days a week or month. While this is generally a move in the right direction for greater employee autonomy, Mark and Sid have some skepticism about the effectiveness of this approach, because in each case there remains a single center of power.\n\n“I am still very much a skeptic around an organization that culturally is anchored in physicality bolting on remote capability,” says Mark. “I have not seen that work, which doesn't mean that it hasn't and I obviously haven't seen every organization out there, but in those cases there sare still real stretches of culture and behaviors when it comes to the haves and have-nots and the people who are in the center.”\n\nThere is intentionally no headquarters for GitLab or InVision, because by creating a physical room where it happens, there are certain advantages for the team members in the room, and disadvantages for those that are not.\n\nHistorically, GitLab’s company robot named Beamy, lived in the San Francisco boardroom, which is in Sid’s home in the city. Beamy was created as an exercise in [transparency](https://handbook.gitlab.com/handbook/values/#transparency), so every GitLab team member can see for themselves that there is no secret headquarters where decisions are made. “I’m just working from home like everyone else,” says Sid.\n\n## All-remote isn’t a value\n\nThe fixtures of GitLab’s company culture are our [values](https://handbook.gitlab.com/handbook/values/): collaboration, results, efficiency, Diversity, Inclusion & Belonging , and transparency. Everything in the company flows from those values, and while being all-remote is a distinguishing feature to our company, like InVision, we don’t really consider it to be a core value.\n\nDuring this part of the discussion, Sid, who is one of the rare people who can stay fully engaged in a conversation while also multitasking, added a section to our handbook, “[What is not a value](https://handbook.gitlab.com/handbook/values/#what-is-not-a-value),” which reads:\n\n“All remote isn't a value. It is something we do because it helps us practice our values of transparency, efficiency, Diversity, Inclusion & Belonging , and results.”\n\nWatch the full conversation between Sid and Mark on GitLab Unfiltered.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/IFBj9KQSQXA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[9,1475,2204],{"slug":2349,"featured":6,"template":734},"pyb-all-remote-mark-frein","content:en-us:blog:pyb-all-remote-mark-frein.yml","Pyb All Remote Mark Frein","en-us/blog/pyb-all-remote-mark-frein.yml","en-us/blog/pyb-all-remote-mark-frein",{"_path":2355,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2356,"content":2362,"config":2367,"_id":2369,"_type":14,"title":2370,"_source":16,"_file":2371,"_stem":2372,"_extension":19},"/en-us/blog/reduce-it-costs",{"title":2357,"description":2358,"ogTitle":2357,"ogDescription":2358,"noIndex":6,"ogImage":2359,"ogUrl":2360,"ogSiteName":720,"ogType":721,"canonicalUrls":2360,"schema":2361},"How to reduce IT costs","Four ways organizations can spend less on IT and more on innovation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680558/Blog/Hero%20Images/reduce-it-costs.jpg","https://about.gitlab.com/blog/reduce-it-costs","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to reduce IT costs\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2019-04-11\",\n      }",{"title":2357,"description":2358,"authors":2363,"heroImage":2359,"date":2364,"body":2365,"category":815,"tags":2366},[772],"2019-04-11","\n\nEfficient organizations do _more_ with _less_ – it's just simple math, really.\nBut even as teams try to stay lean and agile, some IT budgets are anything but. In a [recent survey that analyzed IT spending](https://searchcio.techtarget.com/magazineContent/How-Company-Size-Relates-to-IT-Spending)\nbased on company size, small companies spend on average 6.9 percent of their revenue on IT\n(enterprise spending is usually around 3 percent). Out of this IT spending, [more than 70 percent goes toward maintenance](https://phoenixnap.com/blog/it-cost-reduction-strategy) – just keeping things running.\n\nIT cost reduction could help fund the innovations all companies need to stay competitive,\nbut therein lies the problem. How do you prioritize what stays and what goes when _everything_ feels important?\nReducing IT costs doesn't happen in a vacuum – teams across the organization depend on these decisions.\n\n## Where do I start?\n\n### Reduce on-premise IT\n\n[On-premise IT has several costs](https://ianmartin.com/10-strategies-top-cios-use-reduce-costs/):\nthe servers themselves, power and cooling, staff to maintain them, software licenses, and the\nadditional leased space needed to house it all. [Virtualization hosts multiple virtual instances](https://www.bmc.com/blogs/6-ways-reduce-ongoing-maintenance-management-costs/)\n(Virtual Machines, VMs) of an operating environment on the same machine, reducing the\nnumber of physical servers needed. Virtual environments offer more flexibility, containers\nthat run independently, and fewer costs over the long term.\nTaking on cloud-based architecture embodies doing more with less.\n\n### Evaluate toolchain-management costs\n\nThose that spend more on their IT needs aren't typically the top performers – they just have more stuff.\nEvery application and plugin creates another potential point of failure, and added complexity\nalmost always spoils the efficiency party. Those in charge of IT have to keep up with more\nmaintenance, more patches, more logins, which in turn leads to more IT staff and even more\ncomplexity. Look at your toolchain – plugins, applications, and licenses – and evaluate the costs.\nSeveral \"inexpensive\" licenses that look harmless in micro add up quickly in macro.\nThis doesn't even factor the ongoing costs (upgrades, management, additional staff).\n\nSo much are you paying for your toolchain? We created this handy calculator that shows the\nannual cost for 100 users using some of the most common DevOps tools.\n\n[How much is your toolchain?](/calculator/roi/)\n{: .alert .alert-gitlab-purple .text-center}\n\n### Follow best practices to reduce downtime\n\nDowntime is every team's tech nightmare. It's estimated that [the cost of downtime for an\naverage-sized company is over $7,000 per minute](https://www.datafoundry.com/blog/6-cost-reduction-strategies-enterprise-IT)\n(yikes), and it can have far-reaching implications: worse customer relationships, employee turnover,\nand it can scare off investors, just to name a few.\nWhen facing a budget crunch, it might seem minor to skimp on a few steps when you're confident\nof the outcomes, but doing it right the first time saves much more in the long run.\nFollowing best practices like user testing and code reviews takes time up front, but they lower the chance of costly mistakes.\n\n### Modernize applications and migrate to lower-cost infrastructures\n\nConsolidating tools and an aggressive approach to application modernization are going to\nbe the greatest opportunities for enterprises to save budget dollars.\nA recent survey of top enterprise architects found that [36 percent cited application rationalization as the primary initiative they're working on](https://www.itbusinessedge.com/slideshows/show.aspx?c=93453).\nAnother survey of 250 senior IT executives found that 58 percent of them said\n[the best way to cut IT costs was to modernize or migrate existing applications to a lower-cost IT infrastructure](https://www.itbusinessedge.com/cm/blogs/vizard/application-modernization-tops-it-agenda/?cs=41480).\n\nReducing IT costs is essential for scale and funding innovations that keep organizations competitive,\nbut making the right cuts requires prioritization. For savings in the long term keep these four objectives in mind:\n\n*   Reduce on-premise IT.\n*   Evaluate toolchain-management costs.\n*   Follow best practices to reduce downtime.\n*   Modernize applications and migrate to lower-cost infrastructures.\n\nIT cost reduction directly correlates to increased revenue, but it isn't always easy. It just requires a little commitment.\n\n[Just commit.](/blog/application-modernization-best-practices/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Thomas Jensen](https://unsplash.com/photos/qTEj-KMMq_Q?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/computer-servers?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,563],{"slug":2368,"featured":6,"template":734},"reduce-it-costs","content:en-us:blog:reduce-it-costs.yml","Reduce It Costs","en-us/blog/reduce-it-costs.yml","en-us/blog/reduce-it-costs",{"_path":2374,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2375,"content":2381,"config":2387,"_id":2389,"_type":14,"title":2390,"_source":16,"_file":2391,"_stem":2392,"_extension":19},"/en-us/blog/remote-development-beta",{"title":2376,"description":2377,"ogTitle":2376,"ogDescription":2377,"noIndex":6,"ogImage":2378,"ogUrl":2379,"ogSiteName":720,"ogType":721,"canonicalUrls":2379,"schema":2380},"Behind the scenes of the Remote Development Beta release","Discover the epic journey of GitLab's Remote Development team as they navigate last-minute pivots, adapt, and deliver new features for users worldwide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679888/Blog/Hero%20Images/remotedevelopment.jpg","https://about.gitlab.com/blog/remote-development-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Behind the scenes of the Remote Development Beta release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2023-08-16\",\n      }",{"title":2376,"description":2377,"authors":2382,"heroImage":2378,"date":2384,"body":2385,"category":752,"tags":2386},[2383],"David O'Regan","2023-08-16","\nIn May 2023, the Create:IDE team faced an epic challenge – to merge the [Remote Development Rails monolith integration branch](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/105783) into the `master` branch of the GitLab Project. This was no small ask, as the merge request was of considerable size and complexity. In this blog post, we'll delve into the background, justifications, and process behind this endeavor.\n\nThe merge request titled \"Remote Development feature behind a feature flag\" was initiated by the Create:IDE team, aiming to merge the branch \"remote_dev\" into the \"master\" branch in the Rails monolith GitLab project. The MR contained `4` commits, `258` pipelines, and `143` changes that amounted to a total of `+7243` lines of code added to the codebase.\n\nInitially, the MR was created to reflect the work related to \"Remote Development\" under the \"Category: Remote Development.\" It was primarily intended to have CI pipeline coverage for the integration branch and was not meant for individual review or direct merging. The plan was to merge this code into the master branch via the [\"Remote Development Beta - Review and merge\" Epic](https://gitlab.com/groups/gitlab-org/-/epics/10258).\n\n![SUM](https://about.gitlab.com/images/blogimages/remote-development/SUM.png){: .shadow.medium}\n\n### How the Remote Development project started\nAs a team, we embarked on an ambitious journey to create a greenfield feature: the [Remote Development](https://docs.gitlab.com/ee/user/project/remote_development/) offering at GitLab. This feature had a vast scope, many unknowns, and required solving numerous new problems. To efficiently tackle this task, we decided to work on an integration branch using a [low-ceremony process](https://stackoverflow.com/questions/68092498/what-does-low-ceremony-mean). This decision enabled us to develop and release the feature in an impressively short time frame of less than four months.\n\nWorking on an integration branch provided us the flexibility to make significant progress, but it was always intended to eventually break down the work into smaller, iterative MRs that would follow the standard [GitLab review process](https://docs.gitlab.com/ee/development/code_review.html). We had a [detailed plan](https://gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs/-/blob/main/doc/integration-branch-process.md#master-mr-process-summary) for this process, but we realized that following the original plan would not allow us to meet our goal of releasing of the feature in GitLab 16.0.\n\n### Merging the integration branch MR without breaking it up\nDuring the development of the Remote Development feature, our team faced several challenges that led us to adopt a new approach for merging the integration branch into the master. First, as part of our [velocity-based XP/Scrum style process](https://handbook.gitlab.com/handbook/engineering/devops/dev/create/remote-development/#-remote-development-iteration-planning), we realized that meeting the 16.0 release goal would require us to cut scope. A velocity report, \"[Velocity-based agile planning report](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/118436),\" highlighted that breaking down and reviewing individual MRs would take too long, considering the impending due date and the likelihood of last-minute scope additions.\n\nSecond, we [made the decision](https://gitlab.com/gitlab-org/gitlab/-/issues/398227#note_1361192858) to release workspaces as a **beta feature for public projects** for customers in [GitLab 16.0](/releases/2023/05/22/gitlab-16-0-released/#remote-development-workspaces-available-in-beta-for-public-projects). This approach reduced the complexity of the rollout plan and allowed us to get valuable feedback earlier, but required us to enable the feature by default earlier than planned. To align with this decision, we determined that merging the integration branch after review was the best course of action. An announcement was made to explain the change in plan, and we set specific timelines for the review process to ensure smooth coordination.\n\n> Hello Reviewers/Maintainers 👋 We have opened up a Zoom room through all of next week as an easy sync place for us all to collaborate and triage questions. As the MR is quite large, it might be overwhelming to determine where to begin. To help, we will aim to furnish a summary of what we have included, such as two new database tables and a couple of GraphQL/REST APIs. We will also be available through the week in the Zoom room and without it being too prescriptive of a approach, I would suggest we do a sync walkthrough of the MR first and then kick off the reviews.\n\nAddressing the concerns about risk, team members discussed the challenges and potential solutions. While there were apprehensions, we were confident in the overall quality of the feature. A disciplined plan for merging MRs was initially considered, but based on our velocity metrics, it was evident that meeting the public beta release goal required a new strategy.\n\nDespite the deviations from our usual practices, we acknowledged the urgency to deliver the initial release on time. The decision was not taken lightly, and we ensured that the merge had extensive [test coverage](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) and [feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html) in place to address any potential issues. We accepted that some aspects would be overlooked in the initial MR review cycle, but we committed to addressing them in subsequent iterations.\n\n### Keeping the pipeline green and stable for the merge\nTo ensure the successful merge of the integration branch containing the Remote Development feature, our team made significant efforts to keep the pipeline green and stable. As the MR was quite large and contained critical functionality, it was crucial to maintain a high level of quality and reduce the risk of introducing regressions.\n\nTo address these challenges, the team adopted a disciplined approach to [CI/CD](https://about.gitlab.com/topics/ci-cd/). Throughout the development process, CI pipelines were carefully monitored, and any failing tests or issues were promptly addressed. The team conducted rigorous testing and code reviews to identify and fix potential bugs and ensure that the changes did not negatively impact the existing functionality of the codebase.\n\nAdditionally, extensive test coverage was put in place to ensure that the new feature worked as expected and did not cause unintended side effects. The team utilized GitLab's [test coverage visualization](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) capabilities to track the extent of test coverage and identify areas that required additional testing.\n\n![PIPE](https://about.gitlab.com/images/blogimages/remote-development/PIPE.png){: .shadow.medium}\n\n## The merging process\nAs part of the Remote Development team, we took a strategic approach to the merging process. We identified three categories of follow-up tasks that needed to be addressed after the release:\n\n1. **To-dos:** This category encompassed follow-up issues that required further attention.\n2. **Disabled linting rules:** Any issues related to disabled linting rules were included in this category.\n3. **Follow-up from review:** Non-blocking concerns raised during the review process were categorized here.\n\nTo manage this process effectively, we organized these categories into [child epics](https://docs.gitlab.com/ee/user/group/epics/manage_epics.html#multi-level-child-epics) under the main epic representing the merging effort.\n\n1. Child epic for [to-do follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10472)\n2. Child epic for [disabled linting rules follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10473)\n3. Child epic for [follow-up issues from review](https://gitlab.com/groups/gitlab-org/-/epics/10474)\n\n\n## Reviewer resources\nDuring the integration branch merge process for the Remote Development feature, we ensured a smooth and collaborative review experience for all involved. To facilitate this, we set up the following resources and documented the information in GitLab's issue, epic, and MR reviews for better persistence and traceability:\n\n1. **Dedicated Slack channel:** We had a Slack channel that served as our primary hub for coordinating reviews and resolving any blockers that arose during the process. The discussions, decisions, and important points discussed in this channel were documented in the related GitLab issues and epics. This approach enabled us to maintain a historical record of the conversations for to refer back to in the future.\n2. **General Slack channel:** For non-urgent or non-blocking questions and discussions, reviewers could use the a general Slack channel. Similar to the dedicated channel, we documented the relevant information from this channel in the corresponding issues and MR reviews in GitLab.\n3. **Addressing urgent issues:** When urgent issues required immediate attention, reviewers could directly address our technical leads [Vishal Tak](https://gitlab.com/vtak) and/or [Chad Woolley](https://gitlab.com/cwoolley-gitlab) in their Slack messages. However, we kindly requested that [direct messages were avoided](https://handbook.gitlab.com/handbook/communication/#avoid-direct-messages) to promote open collaboration. The resolutions to these urgent issues were documented in the corresponding GitLab issues or MR discussions.\n4. **Zoom collaboration room:** The collaborative sessions held in the open Zoom room were not only beneficial for real-time discussions but also for fostering a collaborative environment. After each session, we summarized the key points and decisions made during the meeting in the associated GitLab issue or MR, making sure all important outcomes were captured and accessible to the team.\n\nThroughout the review process, we were committed to maintaining a seamless and well-documented workflow. By capturing all relevant information in GitLab issues, epics, and MR reviews, we ensured that the knowledge was persistently available, and future team members could easily understand the context and decisions made during the integration process.\n\n## Application security review\nDuring the application security review process, we focused on providing a secure and reliable Remote Development feature for our users. Here are the key resources and updates related to the application security review:\n\n1. **Main application security review issue:** The main application security review issue served as the central hub for tracking security-related considerations. You can find the defined process we followed [here](https://handbook.gitlab.com/handbook/security/product-security/application-security/appsec-reviews/).\n2. **Application security review comment:** The application security review issue contained a comment indicating that the merge was not blocked unless there were severe issues that could impact production. \"In order to maintain a smooth merge process, we do not block MRs from being merged unless we identify severe issues that could prevent the feature from going into production, such as S1 or S2 level problems. If you are aware of any design flaws or concerns that might qualify as such issues, please bring them to our attention. We can review them together and address any questions or concerns that arise. Let's work collaboratively to find an approach that works for both parties. 👍\"\n3. **Engineering perspective:** For managing the application security review process from an engineering team perspective, we had a dedicated issue, which is kept confidential for security reasons.\n4. **Security and authentication matters:** All security and authentication concerns pertaining to the Beta release were documented within the [`Remote Development Beta -Auth` epic](https://gitlab.com/groups/gitlab-org/-/epics/10377). As of April 30, 2023, we are delighted to announce that **no known issues or obstacles were found that would impede the merge**. This represents a significant accomplishment, considering the intricate nature of this new feature.\n5. **Initial question raised:** During the application security review, one initial question was raised, and we promptly addressed it. You can track the issue and our response [here](https://gitlab.com/gitlab-org/gitlab/-/issues/409317).\n\n## Database review\nTo ensure the reliability and efficiency of the Remote Development feature, we sought guidance from the database reviewer. Although the team had not conducted a thorough self-review, we were fully prepared to address any blocking issues raised during the review process. Our references for the review were:\n\n- [Database review documentation](https://docs.gitlab.com/ee/development/database_review.html)\n- [Database reviewer guidelines](https://docs.gitlab.com/ee/development/database/database_reviewer_guidelines.html)\n\nAs an example, during the database migration review, a discussion arose between [Alper Akgun](https://gitlab.com/a_akgun) and Chad, regarding the efficient ordering of columns in the workspaces table. Alper initially suggested placing integer values at the beginning of the table based on relevant documentation.\n\nChad questioned the benefit of this suggestion, pointing out that the specific integer field, `max_hours_before_termination`, would still be padded with empty bytes even if moved to the front, due to its current position between two text fields.\n\nAlper proposed an alternative approach, emphasizing that organizing variable-sized fields (such as `text`, `varchar`, `arrays`, `json`, `jsonb`) at the end of the table could be sufficient for the workspaces table.\n\nUltimately, Chad took the initiative to implement the changes, moving all variable length fields to the end of the table, and documented the discussion as a comment to address review suggestions.\n\nWith this collaborative effort, the workspaces table was efficiently optimized, and the team gained valuable insights into database column ordering strategies.\n\n![DB](https://about.gitlab.com/images/blogimages/remote-development/DB.png){: .shadow.medium}\n\n## Ruby code review\nDuring the Ruby code review phase, we followed a meticulous approach by conducting a comprehensive self-review of every line of code. Our goal was to ensure the highest code quality and address any potential issues identified by the reviewers effectively.\n\nTo ensure clarity, it's important to clarify that the Ruby code review primarily focused on backend changes and server-side improvements. This included optimizing performance, enhancing functionalities, and refining the overall codebase to deliver a seamless user experience.\n\nFor the code review process, we referred to the [Code review documentation](https://docs.gitlab.com/ee/development/code_review.html), a valuable resource that guided us in maintaining industry best practices and adhering to the GitLab community's coding standards.\n\n### Example: Enhance error messages for unavailable features\nAs an example during the code review, we addressed an essential aspect of the workspace method, focusing on how we handle scenarios related to the `remote_development_feature_flag` and the `remote_development` licensed feature. The primary objective was to enhance the error messages presented to users when these features are not available.\n\nInitially, the code employed identical error messages for both cases, making it less clear to users whether the issue was due to a missing license or a disabled feature flag. This ambiguity could lead to confusion and hinder the user experience.\n\n#### The suggested improvement\nDuring the review, one of our maintainers, [Peter Leitzen](https://gitlab.com/splattael), raised an important question: \"Are we OK with having only a single error message for both cases (missing license and missing feature flag)?\"\n\nRecognizing the importance of clear communication, Chad proposed enhancing the error messages to provide distinct descriptions for each case. This improvement aimed to empower users by precisely conveying the reason behind the unavailability of certain features.\n\n#### The revised implementation\nFollowing Chad's suggestion, the code underwent the following changes:\n\n```ruby\nunless ::Feature.enabled?(:remote_development_feature_flag)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development_feature_flag' feature flag is disabled\"\nend\n\nunless License.feature_available?(:remote_development)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development' licensed feature is not available\"\nend\n\nraise_resource_not_available_error!('Feature is not available') unless current_user&.can?(:read_workspace)\n```\n\n#### The value of distinct error messages\nBy implementing distinct and descriptive error messages, we reinforce our commitment to user-centric development. Users interacting with our system will receive accurate feedback, helping them navigate potential roadblocks effectively. This enhancement not only improves the user experience but also streamlines troubleshooting and support processes.\n\nThis code review example highlights the significance of concise and informative error messages in delivering a top-notch user experience within the GitLab ecosystem. Our team's collaborative efforts ensure that users can confidently interact with our platform, knowing they'll receive clear and helpful error messages when needed.\n\n![BE1](https://about.gitlab.com/images/blogimages/remote-development/BE1.png){: .shadow.medium}\n\n### Example: Improving performance and addressing N+1 issues in WorkspaceType\nIn a recent code review, our team focused on optimizing the WorkspaceType and addressing potential N+1 query problems. The discussion involved two key contributors, [Laura Montemayor](https://gitlab.com/lauraX) and Chad, who worked together to enhance the performance of the codebase.\n\n#### Identifying the performance concerns\nDuring the review, Laura raised a performance concern regarding the possibility of N+1 queries in the WorkspaceType resolver. She suggested that preloading certain associations could be beneficial to avoid this common performance issue.\n\n#### A separate issue for N+1 control\nChad took prompt action and created a separate issue specifically aimed at resolving the N+1 query problems. The new issue, titled \"Address review feedback: Resolve N+1 issues,\" would address the concerns raised by Laura and implement the necessary preloading.\n\n#### Evaluating the potential N+1 impact\nChad provided insightful information about the low risk of real N+1 impact from two particular fields in the current implementation. He elaborated on how the queries for user and agent associations would largely be cache hits due to scoping and usage patterns. Chad diligently examined the cache hits happening in development, confirming the potential optimization.\n\nHere's a code snippet from the initial implementation:\n\n```ruby\n# Initial Implementation\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association\n  def user\n    object.user\n  end\n\n  # Resolver for the agent association\n  def agent\n    object.agent\n  end\nend\n```\n\n#### Treating performance as a priority\nBoth contributors acknowledged the significance of addressing the performance concern, with Laura emphasizing its importance. They agreed to prioritize the separate issue dedicated to resolving the N+1 queries and ensuring proper test coverage.\n\nHere's a code snippet from the revised implementation:\n\n```ruby\n# Revised Implementation with Preloading\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association with preloading\n  def user\n    ::Dataloader.for(::User).load(object.user_id)\n  end\n\n  # Resolver for the agent association with preloading\n  def agent\n    ::Dataloader.for(::Agent).load(object.agent_id)\n  end\nend\n```\n\n#### Considering future usage\nChad expressed excitement about the possibility of the new feature gaining significant usage. He humorously stated that encountering enough legitimate traffic on workspaces to trigger any performance impact would be a delightful problem to have, as it would indicate a growing user base.\n\n#### Collaboration and performance improvement\nThe code review exemplifies the collaborative and proactive approach of our team in optimizing the WorkspaceType. The team's dedication to addressing performance concerns ensures that our codebase remains performant and efficient, even as our user base grows.\n\n![BE2](https://about.gitlab.com/images/blogimages/remote-development/BE2.png){: .shadow.medium}\n\n## Frontend code review\nThe frontend code review process was managed by our resident `Create: IDE` frontend maintainers, [Paul Slaughter](https://gitlab.com/pslaughter) and [Enrique Alcátara](https://gitlab.com/ealcantara). Additionally, a significant portion of the new frontend UI code had already undergone separate reviews and was merged to master, contributing to the overall quality of the Remote Development feature.\n\n### Example: Collaborative code improvement for ApolloCache Mutators\nPaul started a thread on an old version of the diff related to `ee/spec/frontend/remote_development/pages/create_spec.js``. The code snippet in question involved creating a mock Apollo instance and writing queries to the cache.\n\n#### The initial implementation\nInitially, the code involved writing to the cache twice, which raised concerns among the maintainers, Paul and Enrique. Paul pointed out that the duplicate write was unintentional and wondered if the writeQuery was even necessary, given the removal of @client directives. However, he also acknowledged the need to test that the created workspace was added to the ApolloCache.\n\n```javascript\n// Initial Implementation\nconst buildMockApollo = () => {\n  // ... Other mock setup ...\n\n  // Initial writeQuery for userWorkspacesQuery\n  mockApollo.clients.defaultClient.cache.writeQuery({\n    query: userWorkspacesQuery,\n    data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n  });\n\n  // ... Other mock setup ...\n};\n```\n\n#### Identifying a potential issue\nEnrique agreed that the duplicate write was unintentional and probably introduced during a rebase. He explained that pre-populating the cache with a user workspaces query empty result was essential for the mutator to have a place to add the workspace. However, he encountered difficulties in making the workaround work effectively in unit tests.\n\n#### Resolving the issue\nPaul highlighted the significance of pre-populating the cache with the user workspaces query empty result. He suggested leaving a comment to explain the necessity of the initial writeQuery, as it would be implicitly coupled to future writeQuery operations.\n\n```javascript\n// Resolving the Issue - Leaving a Comment\n// Pre-populate the cache with user workspaces query empty result to provide a place\n// for the mutator to add the Workspace later. This is needed for both test and production environments.\nmockApollo.clients.defaultClient.cache.writeQuery({\n  query: userWorkspacesQuery,\n  data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n});\n```\n\nHowever, upon further investigation, Paul discovered that the writeQuery might not be needed, and the issue might be a symptom of an underlying problem. He decided to open a separate thread to address this concern and indicated that he would work on a separate MR to handle it.\n\n```javascript\n// Resolving the Issue - Opening a Separate Thread and MR\n// Open a separate thread to discuss potential underlying issues.\n// Plan to work on a separate MR to handle it.\n// Stay tuned for updates!\n```\n\n![FE](https://about.gitlab.com/images/blogimages/remote-development/FE.png){: .shadow.medium}\n\n## What we learned\nAs part of the Remote Development team, we faced the challenge of merging the Remote Development Rails monolith integration branch to meet our ambitious release goal. We adapted to last-minute pivots and focused on minimizing risks during the review process. The successful merge brought us one step closer to benefiting GitLab users worldwide. We acknowledged areas for improvement and remained committed to refining the feature's quality. Our journey reflects our dedication to delivering results, embracing change, and pushing boundaries in the DevOps community. The release of the Remote Development feature in GitLab 16.0 is a significant milestone for GitLab, and we continue to iterate and grow, providing innovative solutions for developers worldwide.\n\nAn outcome of this process was an ongoing conversation to propose a [simplified review process for greenfield features](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/125117). Through this proposal, we aim to distill the lessons we learned during this experience and provide guidance to future teams facing similar challenges.\n\n## What is next for Remote Development?\nAfter the merge of the MR, several changes were implemented:\n- The first production tests were conducted to ensure the stability and functionality of the merged code.\n- Collaboration took place between the Dev Evangelism and Technical Marketing teams, focusing on [creating content](https://gitlab.com/groups/gitlab-com/marketing/developer-relations/-/epics/190). This collaboration aimed to troubleshoot any issues that arose during the merge.\n- Feedback from the community was taken into account, and changes were made to address the concerns raised. This feedback was incorporated into an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031) and influenced the overall roadmap and direction of the project.\n\nDo you want to [contribute to GitLab](/community/contribute/)? Come and join in the conversation in the `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab), or just pop in and say \"Hi.\"\n\n",[1146,573,1429,9,1430,731],{"slug":2388,"featured":6,"template":734},"remote-development-beta","content:en-us:blog:remote-development-beta.yml","Remote Development Beta","en-us/blog/remote-development-beta.yml","en-us/blog/remote-development-beta",{"_path":2394,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2395,"content":2400,"config":2405,"_id":2407,"_type":14,"title":2408,"_source":16,"_file":2409,"_stem":2410,"_extension":19},"/en-us/blog/running-a-consistent-serverless-platform",{"title":2396,"description":2397,"ogTitle":2396,"ogDescription":2397,"noIndex":6,"ogImage":1625,"ogUrl":2398,"ogSiteName":720,"ogType":721,"canonicalUrls":2398,"schema":2399},"Run a consistent serverless platform with GitLab and Knative","Portability of your serverless platform is now easy with GitLab and Knative.","https://about.gitlab.com/blog/running-a-consistent-serverless-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Run a consistent serverless platform with GitLab and Knative\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Gruesso\"}],\n        \"datePublished\": \"2019-05-02\",\n      }",{"title":2396,"description":2397,"authors":2401,"heroImage":1625,"date":1532,"body":2403,"category":300,"tags":2404},[2402],"Daniel Gruesso","\nThis past April, [Cloud Run](https://cloud.google.com/run/) was announced at Google Cloud Next. As a Google Cloud partner, GitLab had the opportunity to participate and demo our integration during the talk titled, \"[Run a consistent serverless platform anywhere with Kubernetes and Knative](https://youtu.be/lb_bRRAgEyc?t=1100).\"\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/lb_bRRAgEyc?start=1100\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nJust as Kubernetes has become the de facto default platform for running containers, Knative is shaping up to become the answer for running [serverless](/topics/serverless/) workloads in Kubernetes. Cloud Run brings all the benefits of Knative in a fully managed service or as an add-on to your Kubernetes cluster (called “Cloud Run on GKE”), abstracting developers from the complexities of deploying Kubernetes, Knative, and managing a cluster. This empowers developers to focus on adding value vs having to deploy and manage infrastructure.\n\nAt GitLab we believe in the power of open source and adopted Kubernetes and Knative from early on. During the talk, we demoed how GitLab enables operators to deploy Knative with ease so that developers can start deploying Functions-as-a-service (FaaS) or serverless applications using GitLab’s built-in features. GitLab also provides the configured Istio-Ingress endpoints automatically, which operators can then use to configure DNS for their domain, as well as providing the option to bind the domain to the ingress endpoint (via ConfigMap) so that the serving controller can configure the routes. This is all done with a single click.\n\nAfter provisioning your project with the required [serverless templates](https://docs.gitlab.com/ee/update/removals.html), GitLab will automatically build and deploy your application or function as a Knative service, provide you with the endpoint where the service is provisioned, and display load/invocation metrics for your function.\n\n![GitLab Serverless](https://docs.gitlab.com/ee/update/removals.html){: .shadow.small.center.wrap-text}\n\nWhile it’s still early on, we are very excited to partner with both Google Cloud and the Knative community to bring all this awesome functionality to the GitLab community.\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nLearn more about [GitLab Serverless](https://docs.gitlab.com/ee/user/project/clusters/serverless)\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n\n{::options parse_block_html=\"true\" /}\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\nLearn more about [Cloud Run](http://cloud.google.com/run)\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n{: .alert .alert-webcast}\n",[9,278,1229,1147,859],{"slug":2406,"featured":6,"template":734},"running-a-consistent-serverless-platform","content:en-us:blog:running-a-consistent-serverless-platform.yml","Running A Consistent Serverless Platform","en-us/blog/running-a-consistent-serverless-platform.yml","en-us/blog/running-a-consistent-serverless-platform",{"_path":2412,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2413,"content":2419,"config":2424,"_id":2426,"_type":14,"title":2427,"_source":16,"_file":2428,"_stem":2429,"_extension":19},"/en-us/blog/secure-containers-devops",{"title":2414,"description":2415,"ogTitle":2414,"ogDescription":2415,"noIndex":6,"ogImage":2416,"ogUrl":2417,"ogSiteName":720,"ogType":721,"canonicalUrls":2417,"schema":2418},"A shift left strategy for the cloud","Protect your software in the cloud by bringing vulnerability testing closer to remediation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670146/Blog/Hero%20Images/containers-for-five-things-kubernetes-blog-post.jpg","https://about.gitlab.com/blog/secure-containers-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A shift left strategy for the cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cindy Blake\"},{\"@type\":\"Person\",\"name\":\"Vanessa Wegner\"}],\n        \"datePublished\": \"2019-05-03\",\n      }",{"title":2414,"description":2415,"authors":2420,"heroImage":2416,"date":2421,"body":2422,"category":815,"tags":2423},[1471,2117],"2019-05-03","\n\nBusinesses continually adopt new technologies to become more efficient and\neffective. This move toward efficiency in IT has brought a “shift left” to\n[application security](/topics/devsecops/) testing. Methodologies like DevOps and Agile work with iterative\nand [MVP](https://www.agilealliance.org/glossary/mvp/) states, meaning that apps are constantly updating and constantly need\ntesting and retesting – sometimes daily or multiple times per day.\n\n[Serverless](/topics/serverless/), cloud native, containers, and Kubernetes are changing how apps are\ndeployed and managed. This has expanded the attack surface in the form of new\nlayers of complexity and more settings and updates to manage, which also means\nmore room for manual error. In a container, this includes the image, registry,\nand east-west traffic, while in Kubernetes, this includes access and\nauthentication, runtime resources, and network policies. Traffic between apps\nin a container does not cross perimeter network security, but should still be\nmonitored for malicious traffic between apps and the resources they use.\n\n## Your cloud-based ecosystem doesn’t provide comprehensive security\n\nCloud providers, orchestrators, and other partners don’t provide a full\nspectrum of security capabilities out of the box – even with their help, your\nteam must create and maintain their own security policies and continuously\nmonitor your ecosystem for any unusual or malicious activity. While network\nsegmentation and perimeter security for your guest VMs or containers might be\navailable, your engineer will typically need to configure that.\n\nThe figure below outlines the responsibilities of cloud providers, security\nvendors, and end-users, across apps, hosts, networks, and foundation services.\nThe responsibilities in purple and orange are _primarily_ the responsibility of\nthe cloud provider and security vendors, but our engineers tell us that they\nare involved in every cell of this chart in some way.\n\n![Security responsibilities in your cloud ecosystem](https://about.gitlab.com/images/blogimages/container-security-responsibilities.png){: .shadow.medium.center}\n\n## Treat security as a critical outcome, not a department\n\nSecurity should be top of mind for everyone in the business, not just your\nsecurity team. While the complexity of your infrastructure builds, new tools\nand capabilities give opportunity for everyone to contribute to the security\neffort. Here are a few areas of change that will help you rally the masses in\ndefense of your business:\n- Cloud providers are beginning to offer more security capabilities.\n- System updates – and staying current with your patches – could very much save the day.\n- Automating your processes could make or break the business. While guidelines\nfor humans are necessary, you need automation to abstract the complexity of\nyour infrastructure. Soon, automated capabilities to translate plain-language\npolicies into the growing multitude of settings will make their way into the\nmarket.\n\n### Take a Zero Trust approach to your applications\n\nThe foundational idea of [Zero Trust](/blog/evolution-of-zero-trust/) is simple: Trust nothing and always assume\nthe bad guys are trying to get in. It’s time to take your security beyond the\ntraditional network-perimeter approach and extend Zero Trust from data,\nnetwork, and endpoints to your application infrastructure. It also wouldn’t\nhurt to protect the software development lifecycle (SDLC) to ensure the integrity of your software is not\ncompromised, given all of the automation in a typical DevOps toolchain.\n\n## Three key principles to secure next-generation IT\n\n### 1. Enhance your security practices with DevSecOps\n\nAs you iterate on software, dovetail security into each iteration through [DevSecOps](/solutions/application-security-testing/) – not simply\nto test security for the entire history of the app, but to test the impact of\neach change made in every update. Retrofitting your apps and software for\nsecure functionality will slow down your release cycle. Marrying the two\nwill save both time in the present, and heartache in the future when\nyour software is inevitably attacked. Unfortunately, traditional methods don’t\nfit the bill when it comes to DevOps; it’s too expensive and too robust to\nscan every piece of code manually. With a [shift left](/topics/ci-cd/shift-left-devops/) strategy, security scans can be automated into every\ncode commit – meaning you no longer need to choose between risk, cost, and\nagility.\n\n[Arm your developers to resolve vulnerabilities early in the SDLC, leaving your\nsecurity team free to focus on exceptions](/blog/speed-secure-software-delivery-devsecops/).\nWith GitLab, a [review app](https://docs.gitlab.com/ee/ci/review_apps/) is spun up at code commit – before the\nindividual developer’s code is merged to the master. The developer can see and\ntest the working application, with test results highlighting the impact of the\ncode change. [Dynamic application security testing](https://docs.gitlab.com/ee/user/application_security/dast/) (DAST)\ncan then scan the review app, and the developer can quickly iterate to resolve\nvulnerabilities reported in their pipeline report.\n\n![View dynamic application security testing within GitLab.](https://about.gitlab.com/images/blogimages/dast-example.png){: .shadow.medium.center}\n[Learn more about DAST in GitLab's product documentation.](https://docs.gitlab.com/ee/user/application_security/dast/)\n\n### 2. Secure horizontally before digging deeper\n\nWe often fall into the trap of going deep on a single aspect of security –\nleaving other obvious aspects completely exposed. For instance, you may\nuse a powerful scanner for your mission-critical apps but neglect to scan\nothers; or, you may choose to save resources by not scanning your third-party\ncode, with the assumption that its widespread use means it’s checked out.\n\nAvoid focusing so much on application security that you forget about container\nscanning, orchestrators, and access management.\n\n### 3. Simplicity and integration wins\n\nThe key is to bring security scanning to the development process by having a\ntool like GitLab that allows developers to stay within the same platform or\ninterface to both code and scan. Making the process easier increases the\nlikelihood that it’ll get done – and making the process automatic within the\ntool ensures that it will happen every time there is a code update.\n\nReady to deliver secure apps with every update? [Just commit.](/solutions/application-security-testing/)\n{: .alert .alert-gitlab-purple .text-center}\n\nCover image by [Frank McKenna](https://unsplash.com/@frankiefoto) on [Unsplash](https://unsplash.com/photos/tjX_sniNzgQ?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[9,563,859,979],{"slug":2425,"featured":6,"template":734},"secure-containers-devops","content:en-us:blog:secure-containers-devops.yml","Secure Containers Devops","en-us/blog/secure-containers-devops.yml","en-us/blog/secure-containers-devops",{"_path":2431,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2432,"content":2437,"config":2443,"_id":2445,"_type":14,"title":2446,"_source":16,"_file":2447,"_stem":2448,"_extension":19},"/en-us/blog/serverless-js-project-template",{"title":2433,"description":2434,"ogTitle":2433,"ogDescription":2434,"noIndex":6,"ogImage":970,"ogUrl":2435,"ogSiteName":720,"ogType":721,"canonicalUrls":2435,"schema":2436},"Starting a serverless JS project with GitLab","Introduction to the new serverless JS project template at GitLab","https://about.gitlab.com/blog/serverless-js-project-template","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Starting a serverless JS project with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Greiling\"}],\n        \"datePublished\": \"2020-01-14\",\n      }",{"title":2433,"description":2434,"authors":2438,"heroImage":970,"date":2440,"body":2441,"category":1122,"tags":2442},[2439],"Mike Greiling","2020-01-14","{::options parse_block_html=\"true\" /}\n\n\n\n\n\u003C!-- Content start here -->\n\n\nIf you've been working in web development these past few years than you've\nno doubt heard about [serverless](/topics/serverless/) FaaS solutions like\nAWS Lambda or Knative. The idea boils down to writing code as a set of\ndiscrete functions that can be triggered by events. All worries about\nprovisioning server nodes, scaling them, managing your back-end stack, and\nmany other operational tasks are abstracted away. Moreover, it often results\nin massive cost savings as compute resources are provisioned on-demand. As\nthis paradigm is growing in maturity and popularity, many tools have been\ncreated to make the process even easier and there has never been a better\ntime to try it out for yourself.\n\n\nTo demonstrate how easy it is to get started with FaaS in GitLab, we've now\nadded a project template to get you up and running even faster. If you're\ninterested in wading into the serverless waters without running a single\ncommand in your terminal, follow along and try it yourself! All that is\nneeded for this example is a free GitLab account and an AWS account.\n\n\n### 1. Creating a project\n\n\nTo start off, let's create a project with our new serverless template. Open\nup the [new project](https://gitlab.com/projects/new) page and select the\n\"Create from template\" tab. Then scroll down and select the \"Serverless\nFramework/JS\" template.\n\n\n![Step\n1](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-1.1.jpg){:\n.shadow.medium.center}\n\n\nGive your project a name and select \"Create project\"\n\n\n### 2. Configuring your AWS credentials\n\n\nNow that we have our GitLab project complete with a boilerplate serverless\napplication, it's time to give it access credentials to AWS so we can deploy\nit.\n\n\nOpen up the AWS console, sign in, and navigate to the [IAM\nsection](https://console.aws.amazon.com/iam/home). Here you can select\n\"Users\" in the left-hand column, and create a new user using the \"Add user\"\nbutton at the top of the list.\n\n\n![Step\n2-1](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-2.1.jpg){:\n.shadow.medium.center}\n\n\nGive your user a name like \"gitlab-serverless\" and make sure to select the\n\"Programatic access\" checkbox before clicking on \"Next\".\n\n\n![Step\n2-2](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-2.2.jpg){:\n.shadow.medium.center}\n\n\nNow we need to give this user the appropriate permissions to deploy\nserverless functions. On the \"Permissions\" page select \"Attach existing\npolicies directly\" and then click the \"Create policy\" button. This will open\na new window.\n\n\n![Step\n2-3](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-2.3.jpg){:\n.shadow.medium.center}\n\n\nHere you'll need to select the \"JSON\" tab and paste the following policy\nstatement:\n\n\n```json\n\n{\n  \"Statement\": [\n    {\n      \"Action\": [\n        \"apigateway:*\",\n        \"cloudformation:CancelUpdateStack\",\n        \"cloudformation:ContinueUpdateRollback\",\n        \"cloudformation:CreateChangeSet\",\n        \"cloudformation:CreateStack\",\n        \"cloudformation:CreateUploadBucket\",\n        \"cloudformation:DeleteStack\",\n        \"cloudformation:Describe*\",\n        \"cloudformation:EstimateTemplateCost\",\n        \"cloudformation:ExecuteChangeSet\",\n        \"cloudformation:Get*\",\n        \"cloudformation:List*\",\n        \"cloudformation:PreviewStackUpdate\",\n        \"cloudformation:UpdateStack\",\n        \"cloudformation:UpdateTerminationProtection\",\n        \"cloudformation:ValidateTemplate\",\n        \"dynamodb:CreateTable\",\n        \"dynamodb:DeleteTable\",\n        \"dynamodb:DescribeTable\",\n        \"ec2:AttachInternetGateway\",\n        \"ec2:AuthorizeSecurityGroupIngress\",\n        \"ec2:CreateInternetGateway\",\n        \"ec2:CreateNetworkAcl\",\n        \"ec2:CreateNetworkAclEntry\",\n        \"ec2:CreateRouteTable\",\n        \"ec2:CreateSecurityGroup\",\n        \"ec2:CreateSubnet\",\n        \"ec2:CreateTags\",\n        \"ec2:CreateVpc\",\n        \"ec2:DeleteInternetGateway\",\n        \"ec2:DeleteNetworkAcl\",\n        \"ec2:DeleteNetworkAclEntry\",\n        \"ec2:DeleteRouteTable\",\n        \"ec2:DeleteSecurityGroup\",\n        \"ec2:DeleteSubnet\",\n        \"ec2:DeleteVpc\",\n        \"ec2:Describe*\",\n        \"ec2:DetachInternetGateway\",\n        \"ec2:ModifyVpcAttribute\",\n        \"events:DeleteRule\",\n        \"events:DescribeRule\",\n        \"events:ListRuleNamesByTarget\",\n        \"events:ListRules\",\n        \"events:ListTargetsByRule\",\n        \"events:PutRule\",\n        \"events:PutTargets\",\n        \"events:RemoveTargets\",\n        \"iam:CreateRole\",\n        \"iam:DeleteRole\",\n        \"iam:DeleteRolePolicy\",\n        \"iam:GetRole\",\n        \"iam:PassRole\",\n        \"iam:PutRolePolicy\",\n        \"iot:CreateTopicRule\",\n        \"iot:DeleteTopicRule\",\n        \"iot:DisableTopicRule\",\n        \"iot:EnableTopicRule\",\n        \"iot:ReplaceTopicRule\",\n        \"kinesis:CreateStream\",\n        \"kinesis:DeleteStream\",\n        \"kinesis:DescribeStream\",\n        \"lambda:*\",\n        \"logs:CreateLogGroup\",\n        \"logs:DeleteLogGroup\",\n        \"logs:DescribeLogGroups\",\n        \"logs:DescribeLogStreams\",\n        \"logs:FilterLogEvents\",\n        \"logs:GetLogEvents\",\n        \"s3:CreateBucket\",\n        \"s3:DeleteBucket\",\n        \"s3:DeleteBucketPolicy\",\n        \"s3:DeleteObject\",\n        \"s3:DeleteObjectVersion\",\n        \"s3:GetObject\",\n        \"s3:GetObjectVersion\",\n        \"s3:ListAllMyBuckets\",\n        \"s3:ListBucket\",\n        \"s3:PutBucketNotification\",\n        \"s3:PutBucketPolicy\",\n        \"s3:PutBucketTagging\",\n        \"s3:PutBucketWebsite\",\n        \"s3:PutEncryptionConfiguration\",\n        \"s3:PutObject\",\n        \"sns:CreateTopic\",\n        \"sns:DeleteTopic\",\n        \"sns:GetSubscriptionAttributes\",\n        \"sns:GetTopicAttributes\",\n        \"sns:ListSubscriptions\",\n        \"sns:ListSubscriptionsByTopic\",\n        \"sns:ListTopics\",\n        \"sns:SetSubscriptionAttributes\",\n        \"sns:SetTopicAttributes\",\n        \"sns:Subscribe\",\n        \"sns:Unsubscribe\",\n        \"states:CreateStateMachine\",\n        \"states:DeleteStateMachine\"\n      ],\n      \"Effect\": \"Allow\",\n      \"Resource\": \"*\"\n    }\n  ],\n  \"Version\": \"2012-10-17\"\n}\n\n```\n\n\n> Note: This policy is an example that encompasses pretty much everything\nthe Serverless framework _might_ need on AWS, but much of it not likely to\nbe used. You may wish to restrict this policy to fit your needs and security\nrequirements. At minimum, the serverless credentials will need access to the\n`cloudformation`, `iam`, `lambda`, `logs`, and `s3` functions specified\nabove.\n\n\n![Step\n2-4](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-2.4.jpg){:\n.shadow.medium.center}\n\n\nClick \"Review Policy\" and you'll need to give this policy a name. I've used\n\"GitLabServerlessPolicy\". Then click \"Create policy\".\n\n\nAfter this is done, return to your \"Add user\" tab and search for the policy\nyou just created (you may need to hit the \"refresh\" icon on the right).\nCheck the box next to this policy and select the \"Next\" button.\n\n\n![Step\n2-5](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-2.5.jpg){:\n.shadow.medium.center}\n\n\nProceed to add tags or skip straight to the review. The final page should\nresemble the following:\n\n\n![Step\n2-6](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-2.6.jpg){:\n.shadow.medium.center}\n\n\nAfter clicking \"Create user\" you should finally be presented with a page\nthat shows you your access credentials for this new AWS user account. Select\n\"show\" next to the \"secret access key\" and copy both this and the access key\nID someplace for safe keeping.\n\n\n### 3. Entering your AWS credentials\n\n\nReturning back to GitLab, we'll need to enter these two credentials into our\nproject's [CI/CD settings](/topics/ci-cd/). Select \"Settings -> CI/CD\" in\nthe left-hand menu.\n\n\n![Step\n3-1](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-3.1.jpg){:\n.shadow.small.center}\n\n\nOn this page, we need to expand the Variables section and enter our AWS\ncredentials:\n\n\n![Step\n3-2](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-3.2.jpg){:\n.shadow.medium.center}\n\n\nUse `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` as the keys for the two\nvalues you copied from AWS in the previous step. Don't forget to click \"Save\nvariables\".\n\n\n### 4. Deploying your first AWS Lambda function.\n\n\nNow it's time to deploy your serverless project. If you're doing this on\ngitlab.com you've already got access to a GitLab runner with 2,000 free CI\npipeline minutes, if not you'll need to [configure a runner\nyourself](https://docs.gitlab.com/runner/#install-gitlab-runner).\n\n\nGo to \"CI/CD -> Pipelines\" in the left-hand menu and click the \"Run\nPipeline\" button. For fun, let's enter an environment variable with the key\n`A_VARIABLE` and give it whatever value you want. This will be usable by our\ndeployed function.\n\n\n![Step\n4-1](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-4.1.jpg){:\n.shadow.medium.center}\n\n\nSelect \"Run Pipeline\" and you should see your jobs start running. This\nproject template has tests which will automatically run every time you run a\npipeline. Once those are complete, the \"production\" job will deploy your\ncode to AWS Lambda and finally it will produce a landing page on [GitLab\nPages](https://docs.gitlab.com/ee/user/project/pages/). After just\na few minutes this process should complete and you can visit \"Settings ->\nPages\" to see a link to the URL where your GitLab project has been deployed.\n(It may take a few minutes before this URL is accessible the first time you\nmake a deployment).\n\n\n![Step\n4-2](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-4.2.jpg){:\n.shadow.medium.center}\n\n\nWhen you visit this page, here's what you'll see:\n\n\n![Step 4\nResult](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-4.3.gif){:\n.shadow.medium.center}\n\n\nYou can enter an input value and click \"run function\". This input is sent to\nyour serverless function which then responds and the response is printed\nunder \"Function Output:\". Note that the environment value we provided using\nthe `A_VARIABLE` key is present as well.\n\n\n### 5. Making Changes\n\n\nNow that we have a working AWS serverless project, let's try to make our own\nfunction. How about a simple calculator?\n\n\nOpen up the Web IDE and let's make the following changes:\n\n\nWithin `src/handler.js` add the following function:\n\n\n```javascript\n\nmodule.exports.add = async function(event) {\n  const A = Number(event.queryStringParameters.A);\n  const B = Number(event.queryStringParameters.B);\n  const result = A + B;\n\n  return {\n    statusCode: 200,\n    headers: {\n      \"Access-Control-Allow-Origin\": \"*\"\n    },\n    body: result\n  };\n};\n\n```\n\n\nThen open `public/index.html` and replace it with:\n\n\n```html\n\n\u003C!DOCTYPE html>\n\n\u003Chtml>\n  \u003Chead>\n    \u003Ctitle>GitLab Serverless Framework example\u003C/title>\n  \u003C/head>\n  \u003Cbody>\n    \u003Ch3>Add two values:\u003C/h3>\n    \u003Clabel>A: \u003Cinput type=\"text\" id=\"inputA\" placeholder=\"0\" name=\"A\"/>\u003C/label>\n    \u003Clabel>B: \u003Cinput type=\"text\" id=\"inputB\" placeholder=\"0\" name=\"B\"/>\u003C/label>\n    \u003Cstrong>=\u003C/strong>\n    \u003Cspan id=\"functionOutput\">?\u003C/span>\n    \u003Cbr />\n    \u003Cbutton>Calculate!\u003C/button>\n\n    \u003Cscript>\n      fetch(\"./stack.json\").then(response => {\n        response.json().then(myJson => {\n          const functionUrl = myJson.ServiceEndpoint + \"/add\";\n          const inputA = document.querySelector(\"#inputA\");\n          const inputB = document.querySelector(\"#inputB\");\n          const output = document.querySelector(\"#functionOutput\");\n\n          document.querySelector(\"button\").addEventListener(\"click\", () => {\n            const A = Number(inputA.value);\n            const B = Number(inputB.value);\n\n            fetch(functionUrl + \"?A=\" + A + \"&B=\" + B)\n              .then(response => response.text())\n              .then(result => (output.textContent = result));\n          });\n        });\n      });\n    \u003C/script>\n  \u003C/body>\n\u003C/html>\n\n```\n\n\nLastly, open `serverless.yml` and add an \"add\" function entry below our\n\"hello\" function like so:\n\n\n```yml\n\nfunctions:\n  hello:\n    handler: src/handler.hello\n    events:\n      - http:\n          path: hello\n          method: get\n          cors: true\n  add:\n    handler: src/handler.add\n    events:\n      - http:\n          path: add\n          method: get\n          cors: true\n```\n\n\nStage and commit these changes directly to the `master` branch. This will\nhave triggered a new pipeline automatically. You can visit \"CI/CD ->\nPipelines\" and watch it run.\n\n\nOnce the deployment is complete, our project page should look like this:\n\n\n![Step 5\nResult](https://about.gitlab.com/images/blogimages/serverless-js-project-template/step-5.1.gif){:\n.shadow.medium.center}\n\n\nVoilà, we've just created our own serverless function and deployed it\nwithout a single terminal command. There's a lot more you can do from here,\nbut this should be a good place to get started. Happy coding!\n\n\n\u003C!-- Content ends here -->\n\n\nCover image by [Kaushik Panchal](https://unsplash.com/@kaushikpanchal) on\n[Unsplash](https://unsplash.com/)\n\n{: .note}\n",[232,796,9],{"slug":2444,"featured":6,"template":734},"serverless-js-project-template","content:en-us:blog:serverless-js-project-template.yml","Serverless Js Project Template","en-us/blog/serverless-js-project-template.yml","en-us/blog/serverless-js-project-template",{"_path":2450,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2451,"content":2457,"config":2463,"_id":2465,"_type":14,"title":2466,"_source":16,"_file":2467,"_stem":2468,"_extension":19},"/en-us/blog/set-up-infrastructure-for-cloud-development-environments",{"title":2452,"description":2453,"ogTitle":2452,"ogDescription":2453,"noIndex":6,"ogImage":2454,"ogUrl":2455,"ogSiteName":720,"ogType":721,"canonicalUrls":2455,"schema":2456},"Cloud infrastructure for on-demand development in GitLab","Learn how to set up the requirements, manage Kubernetes clusters in different clouds, create the first workspaces and custom images, and get tips and troubleshooting.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659883/Blog/Hero%20Images/post-cover-image.jpg","https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Set up your infrastructure for on-demand, cloud-based development environments in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-07-13\",\n      }",{"title":2458,"description":2453,"authors":2459,"heroImage":2454,"date":2460,"body":2461,"category":752,"tags":2462},"Set up your infrastructure for on-demand, cloud-based development environments in GitLab",[1099],"2023-07-13","Cloud-based development environments enable a better developer onboarding experience and help make teams more efficient. In this tutorial, you'll learn how to ready your infrastructure for on-demand, cloud-based development environments. You'll also learn how to set up the requirements, manage Kubernetes clusters in different clouds, create your first workspaces and custom images, and get tips for troubleshooting.\n\nThe GitLab agent for Kubernetes, an OAuth GitLab app, and a proxy pod deployment make the setup reproducible in different Kubernetes cluster environments and follow cloud-native best practices. Bringing your infrastructure allows platform teams to store the workspace data securely, control resource usage, harden security, and troubleshoot the deployments in known ways.\n\nThis blog post is a long read so feel free to navigate to the sections of interest. However, if you want to follow the tutorial step by step, the sections depend on one another for the parts pertaining to infrastructure setup.\n\n- [Development environments on your infrastructure](#development-environments-on-your-infrastructure)\n- [Requirements](#requirements)\n    - [Workspaces domain](#workspaces-domain)\n    - [TLS certificates](#tls-certificates)\n- [GitLab OAuth application ](#gitlab-oauth-application)\n- [Kubernetes cluster setup](#kubernetes-cluster-setup)\n    - [Set up infrastructure with Google Kubernetes Engine (GKE)](#set-up-infrastructure-with-google-kubernetes-engine=gke)\n    - [Set up infrastructure with Amazon Elastic Kubernetes Service (EKS)](#set-up-infrastructure-with-amazon-elastic-kubernetes-service-eks)\n    - [Set up infrastructure with Azure Managed Kubernetes Service (AKS)](#set-up-infrastructure-with-azure-managed-kubernetes-service-aks)\n    - [Set up infrastructure with Civo Cloud Kubernetes](#set-up-infrastructure-with-civo-cloud-kubernetes)\n    - [Set up infrastructure with self-managed Kubernetes](#set-up-infrastructure-with-self-managed-kubernetes)\n- [Workspaces proxy installation into Kubernetes](#workspaces-proxy-installation-into-kubernetes)\n- [Agent for Kubernetes installation](#agent-for-kubernetes-installation)\n- [Workspaces creation](#workspaces-creation)\n    - [Create the first workspaces](#create-the-first-workspaces)\n    - [Custom workspace container images](#custom-workspace-container-images)\n- [Tips](#tips)\n    - [Certificate management](#certificate-management)\n    - [Troubleshooting](#troubleshooting)\n    - [Contribute](#contribute)\n- [Share your feedback](#share-your-feedback)\n\n## Development environments on your infrastructure\nSecure, on-demand, cloud-based development workspaces are [available in beta for public projects](/blog/introducing-workspaces-beta/) for Premium and Ultimate customers. The first iteration allows you to bring your own infrastructure as a Kubernetes cluster. GitLab already deeply integrates with Kubernetes through the GitLab agent for Kubernetes, setting the foundation for configuration and cluster management.\n\nUsers can define and use a development environment template in a project. Workspaces in GitLab support the [devfile specification](https://docs.gitlab.com/ee/user/workspace/#devfile) as `.devfile.yaml` in the project repository root. The devfile attributes allow configuring of the workspace. For example, the `image` attribute specifies the container image to run and create the workspace in isolated container environments. The containers require a cluster orchestrator, such as Kubernetes, that manages resource usage and ensures data security and safety. Workspaces also need authorization: Project source code may contain sensitive intellectual property or otherwise confidential data in specific environments. The setup requires a GitLab OAuth application as the foundation here.\n\nThe following steps provide an in-depth setup guide for different cloud providers. If you prefer to set up your own environment, please follow the [documentation for workspace prerequisites](https://docs.gitlab.com/ee/user/workspace/#prerequisites). In general, we will practice the following steps:\n0. (Optional) Register a workspaces domain, and create TLS certificates.\n1. Create a Kubernetes cluster and configure access and requirements.\n2. Install an Ingress controller.\n3. Set up the workspaces proxy with the domain, TLS certificates, and OAuth app.\n4. Create a new GitLab group with a GitLab agent project. The agent can be used for all projects in that group.\n5. Install the GitLab agent for Kubernetes using the UI provided Helm chart command.\n6. Create an example project with a devfile configuration for workspaces.\n\nSome commands do not use the terminal indicator (`$` or `#`) to support easier copy-paste of command blocks into terminals.\n\n## Requirements\nThe steps in this blog post require the following CLI tools:\n1. `kubectl` and `helm` for Kubernetes\n2. `certbot` for Let's Encrypt\n3. git, curl, dig, openssl, and sslscan for troubleshooting\n\n### Workspaces domain\nWorkspaces require a domain with DNS entries. Cloud providers, for example, Google Cloud, also provide domain services which integrate more easily. You can also register and manage domains with your preferred provider.\n\nThe required DNS entries will be:\n- Wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`) A/AAAA records pointing to the external Kubernetes external IP: `kubectl get services -A`\n- (Optional, with Let's Encrypt) ACME DNS challenge entries as TXT records\n\nAfter acquiring a domain, wait until the Kubernetes setup is ready and extract the A/AAAA records for the DNS settings. The following example shows how `remote-dev.dev` is configured in the Google Cloud DNS service.\n\n![GitLab remote development workspaces, example DNS configuration for remote-dev.dev](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_google_cloud_dns_remote-dev.dev-entries.png){: .shadow}\n\nExport shell variables that define the workspaces domains, and the email contact. These variables will be used in all setup steps below.\n\n```\nexport EMAIL=\"user@company.com\"\nexport GITLAB_WORKSPACES_PROXY_DOMAIN=\"remote-dev.dev\"\nexport GITLAB_WORKSPACES_WILDCARD_DOMAIN=\"*.remote-dev.dev\"\n```\n\n**Note:** This blog post will show the example domain `remote-dev.dev` for better understanding with a working example. The domain `remote-dev.dev` is maintained by the [Developer Evangelism team at GitLab](https://handbook.gitlab.com/handbook/marketing/developer-relations/developer-advocacy/projects/). There are no public demo environments available at the time of writing this blog post.\n\n### TLS certificates\nTLS certificates can be managed with different methods. To get started quickly, it is recommended to follow the [documentation steps](https://docs.gitlab.com/ee/user/workspace/#prerequisites) with Let's Encrypt and later consider production requirements with TLS certificates.\n\n```shell\ncertbot -d \"${GITLAB_WORKSPACES_PROXY_DOMAIN}\" \\\n  -m \"${EMAIL}\" \\\n  --config-dir ~/.certbot/config \\\n  --logs-dir ~/.certbot/logs \\\n  --work-dir ~/.certbot/work \\\n  --manual \\\n  --preferred-challenges dns certonly\n\n  certbot -d \"${GITLAB_WORKSPACES_WILDCARD_DOMAIN}\" \\\n  -m \"${EMAIL}\" \\\n  --config-dir ~/.certbot/config \\\n  --logs-dir ~/.certbot/logs \\\n  --work-dir ~/.certbot/work \\\n  --manual \\\n  --preferred-challenges dns certonly\n```\n\nThe Let's Encrypt CLI prompts you for the ACME DNS challenge. This requires setting TXT records for the challenge session immediately. Add the DNS records and specify a low TTL (time-to-live) of 300 seconds to update the records during the first steps.\n\n```\n_acme-challenge TXT \u003Cstringfromletsencryptacmechallenge>\n```\n\nYou can verify the DNS records using the `dig` CLI command.\n\n```shell\n$ dig _acme-challenge.remote-dev.dev txt\n...\n;; ANSWER SECTION:\n_acme-challenge.remote-dev.dev.\t246 IN\tTXT\t\"TlGRM9JGdXHGVklPWgytflxWDF82Sv04nF--Wl9JFvg\"\n_acme-challenge.remote-dev.dev.\t246 IN\tTXT\t\"CqG_54w6I0heWF3wLMAmUAitPcUMs9qAU9b8QhBWFj8\"\n```\n\nOnce the Let's Encrypt routine is complete, note the TLS certificate location.\n\n```\nSuccessfully received certificate.\nCertificate is saved at: /Users/mfriedrich/.certbot/config/live/remote-dev.dev/fullchain.pem\nKey is saved at:         /Users/mfriedrich/.certbot/config/live/remote-dev.dev/privkey.pem\nThis certificate expires on 2023-08-15.\nThese files will be updated when the certificate renews.\n\nSuccessfully received certificate.\nCertificate is saved at: /Users/mfriedrich/.certbot/config/live/remote-dev.dev-0001/fullchain.pem\nKey is saved at:         /Users/mfriedrich/.certbot/config/live/remote-dev.dev-0001/privkey.pem\nThis certificate expires on 2023-08-15.\nThese files will be updated when the certificate renews.\n```\n\nExport the TLS certificate paths into environment variables for the following setup steps.\n\n```shell\nexport WORKSPACES_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/fullchain.pem\"\nexport WORKSPACES_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/privkey.pem\"\n\nexport WILDCARD_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/fullchain.pem\"\nexport WILDCARD_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/privkey.pem\"\n```\n\n**Note**: If you prefer to use your certificates, please copy the files into a safe location, and export the environment variables with the path details.\n\n## GitLab OAuth application\n_After preparing the requirements, continue with the components setup._\n\nCreate a [group-owned OAuth application](https://docs.gitlab.com/ee/integration/oauth_provider.html) for the remote development workspaces group. Creating a centrally managed app with a service account or group with limited access is recommended for production use.\n\nNavigate into the group `Settings > Applications` and specify the following values:\n\n1. Name: `Remote Development workspaces by \u003Cresponsible team> - \u003Cdomain>`. Add the reponsible team that is trusted in your organization. For debugging, add the domain. There might be multiple authorization groups, this helps the identification which workspace domain is used.\n2. Redirect URI: `https://\u003CGITLAB_WORKSPACES_PROXY_DOMAIN>/auth/callback`. Replace `GITLAB_WORKSPACES_PROXY_DOMAIN` with the domain string value.\n3. Set the scopes to `api, read_user, openid, profile` .\n\n![GitLab remote development workspaces, OAuth application in the group settings](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_oauth_app_create.png){: .shadow}\n\nStore the OAuth application details in your password vault, and export them as shell environment variables for the next setup steps.\n\nCreate a configuration secret for the proxy as a signing key (`SIGNING_KEY`), and store it in a safe place (for example, use a secrets vault like 1Password to create and store the key).\n\n```\nexport CLIENT_ID=\"XXXXXXXXX\" # Look into password vault and set\nexport CLIENT_SECRET=\"XXXXXXXXXX\" # Look into password vault and set\nexport REDIRECT_URI=\"https://${GITLAB_WORKSPACES_PROXY_DOMAIN}/auth/callback\"\n\nexport GITLAB_URL=\"https://gitlab.com\" # Replace with your self-managed GitLab instance URL if not using GitLab.com SaaS\nexport SIGNING_KEY=\"a_random_key_consisting_of_letters_numbers_and_special_chars\" # Look into password vault and set\n```\n\n## Kubernetes cluster setup\nThe following sections describe how to set up a Kubernetes cluster in different cloud and on-premises environments and install an [ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) for HTTP access. After completing the Kubernetes setup, you can continue with the workspaces proxy and agent setup steps.\n\n**Choose one method to create a Kubernetes cluster. Note: Use `amd64` as platform architecture [until multi-architecture support is available for running workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10594).** Cloud environments with Arm support will not work yet, for example AWS EKS on Graviton EC2 instances.\n\nYou should have defined the following variables from the previous setup steps:\n\n```sh\nexport EMAIL=\"user@company.com\"\nexport GITLAB_WORKSPACES_PROXY_DOMAIN=\"remote-dev.dev\"\nexport GITLAB_WORKSPACES_WILDCARD_DOMAIN=\"*.remote-dev.dev\"\n\nexport WORKSPACES_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/fullchain.pem\"\nexport WORKSPACES_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}/privkey.pem\"\n\nexport WILDCARD_DOMAIN_CERT=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/fullchain.pem\"\nexport WILDCARD_DOMAIN_KEY=\"${HOME}/.certbot/config/live/${GITLAB_WORKSPACES_PROXY_DOMAIN}-0001/privkey.pem\"\n\nexport CLIENT_ID=\"XXXXXXXXX\" # Look into password vault and set\nexport CLIENT_SECRET=\"XXXXXXXXXX\" # Look into password vault and set\nexport REDIRECT_URI=\"https://${GITLAB_WORKSPACES_PROXY_DOMAIN}/auth/callback\"\n\nexport GITLAB_URL=\"https://gitlab.com\" # Replace with your self-managed GitLab instance URL if not using GitLab.com SaaS\nexport SIGNING_KEY=\"XXXXXXXX\" # Look into password vault and set\n\n```\n\n### Set up infrastructure with Google Kubernetes Engine (GKE)\n\n[Install and configure the Google Cloud SDK and `gcloud` CLI](https://cloud.google.com/sdk/docs/install?hl=en), and install the `gke-gcloud-auth-plugin` plugin to authenticate against Google Cloud.\n\n```shell\nbrew install --cask google-cloud-sdk\n\ngcloud components install gke-gcloud-auth-plugin\n\ngcloud auth login\n```\n\nCreate a new GKE cluster using the `gcloud` command, or follow the steps in the Google Cloud Console.\n\n```shell\n\nexport GCLOUD_PROJECT=group-community\nexport GCLOUD_CLUSTER=de-remote-development-1\n\ngcloud config set project $GCLOUD_PROJECT\n\n# Create cluster (modify for your needs)\ngcloud container clusters create $GCLOUD_CLUSTER \\\n    --release-channel stable \\\n    --zone us-central1-c \\\n    --project $GCLOUD_PROJECT\n\n# Verify cluster\ngcloud container clusters list\n\nNAME                     LOCATION         MASTER_VERSION   MASTER_IP       MACHINE_TYPE  NODE_VERSION       NUM_NODES  STATUS\nde-remote-development-1  us-central1-c    1.26.3-gke.1000  34.136.33.199   e2-medium     1.26.3-gke.1000    3          RUNNING\n\ngcloud container clusters get-credentials $GCLOUD_CLUSTER --zone us-central1-c --project $GCLOUD_PROJECT\nFetching cluster endpoint and auth data.\nkubeconfig entry generated for de-remote-development-1.\n```\n\n1. The setup requires the [`Kubernetes Engine Admin` role in Google IAM](https://cloud.google.com/kubernetes-engine/docs/concepts/access-control?hl=en#recommendations) to create ClusterRoleBindings.\n2. Create a new Kubernetes cluster (do not use Autopilot).\n3. Ensure that [cluster autoscaling](https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler?hl=en) is enabled in the GKE cluster.\n4. Verify that a [default Storage Class](https://cloud.google.com/kubernetes-engine/docs/concepts/persistent-volumes?hl=en#storageclasses) has been defined.\n5. Install an Ingress controller, for example [ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/#gce-gke). Follow the documentation and run the following commands to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nkubectl create clusterrolebinding cluster-admin-binding \\\n  --clusterrole cluster-admin \\\n  --user $(gcloud config get-value account)\n\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.7.1/deploy/static/provider/cloud/deploy.yaml\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\ngcloud container clusters list\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Amazon Elastic Kubernetes Service (EKS)\nCreating an Amazon EKS cluster requires [cluster IAM roles](https://docs.aws.amazon.com/eks/latest/userguide/create-cluster.html). You can the [`eksctl` CLI for Amazon EKS](https://eksctl.io/), which automatically creates the roles. `eksctl` [requires the AWS IAM Authenticator for Kubernetes](https://github.com/weaveworks/eksctl/blob/main/README.md#prerequisite), which will get pulled with Homebrew automatically on macOS.\n\n```shell\nbrew install eksctl awscli aws-iam-authenticator\naws configure\n\neksctl create cluster --name remote-dev \\\n    --region us-west-2 \\\n    --node-type m5.xlarge \\\n    --nodes 3 \\\n    --nodes-min=1 \\\n    --nodes-max=4 \\\n    --version=1.26 \\\n    --asg-access\n```\n\nThe eksctl command uses the [`--asg-access`, `--nodes-min/max` parameters for auto-scaling](https://eksctl.io/usage/autoscaling/). The autoscaler requires [additional configuration steps](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md), alternatively [Karpenter is supported in Amazon EKS](https://karpenter.sh/docs/getting-started/getting-started-with-karpenter/). Review the [autoscaling documentation](https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html), and [default Storage Class `gp2`](https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html) fulfilling the requirements. The Kubernetes configuration is automatically updated locally.\n\nInstall the [Nginx Ingress controller for EKS](https://kubernetes.github.io/ingress-nginx/deploy/#aws). Follow the documentation and run the following command to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nkubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/aws/deploy.yaml\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\neksctl get cluster --region us-west-2 --name remote-dev\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Azure Managed Kubernetes Service (AKS)\nInstall [Azure CLI](https://learn.microsoft.com/en-us/azure/aks/learn/quick-kubernetes-deploy-cli).\n\n```shell\nbrew install azure-cli\n\naz login\n```\n\nReview the documentation for the [cluster autoscaler in AKS](https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler) and the [default Storage Class being `managed-csi`](https://learn.microsoft.com/en-us/azure/aks/concepts-storage#storage-classes), create a new resource group, and create a new Kubernetes cluster. Download the Kubernetes configuration to continue with the `kubectl` commands.\n\n```shell\naz group create --name remote-dev-rg --location eastus\n\naz aks create \\\n--resource-group remote-dev-rg \\\n--name remote-dev \\\n--node-count 1 \\\n--vm-set-type VirtualMachineScaleSets \\\n--load-balancer-sku standard \\\n--enable-cluster-autoscaler \\\n--min-count 1 \\\n--max-count 3\n\naz aks get-credentials --resource-group remote-dev-rg --name remote-dev\n```\n\nInstall the [Nginx ingress controller in AKS](https://learn.microsoft.com/en-us/azure/aks/ingress-basic?tabs=azure-cli#basic-configuration). Follow the documentation and run the following commands to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nNAMESPACE=ingress-basic\n\nhelm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx\nhelm repo update\n\nhelm install ingress-nginx ingress-nginx/ingress-nginx \\\n  --create-namespace \\\n  --namespace $NAMESPACE \\\n  --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\"=/healthz\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\nkubectl get services --namespace ingress-basic -o wide -w ingress-nginx-controller\n\nkubectl get services -A\n```\n\n### Set up infrastructure with Civo Cloud Kubernetes\nInstall and configure the [Civo CLI](https://www.civo.com/docs/kubernetes/create-a-cluster#creating-a-cluster-using-civo-cli), and create a Kubernetes cluster using 2 nodes, 4 CPUs, 8 GB RAM.\n\n```shell\ncivo kubernetes create remote-dev -n 2 -s g4s.kube.large\n\ncivo kubernetes config remote-dev --save\nkubectl config use-context remote-dev\n```\n\nYou have full permissions on the cluster to create ClusterRoleBindings. The [default Storage Class](https://www.civo.com/docs/kubernetes/kubernetes-volumes#creating-a-persistent-volume-claim-pvc) is set to 'civo-volume'.\n\nInstall the [Nginx Ingress controller using Helm](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start). Follow the documentation and run the following command to install `ingress-nginx` into the Kubernetes cluster.\n\n```shell\nhelm upgrade --install ingress-nginx ingress-nginx \\\n  --repo https://kubernetes.github.io/ingress-nginx \\\n  --namespace ingress-nginx --create-namespace\n\n```\n\nPrint the external IP for the DNS records, and update wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`).\n\n```shell\ncivo kubernetes show remote-dev\n\nkubectl get services -A\n```\n\n### Set up infrastructure with self-managed Kubernetes\nThe process follows similar steps, requiring a user with permission to create `ClusterRoleBinding` resources. The [Nginx Ingress controller](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start) is the fastest path forward. Once the cluster is ready, print the load balancer IP for the DNS records, and create/update A/AAAA record for wildcard DNS (`*.remote-dev.dev`) and hostname (`remote-dev.dev`) pointing to the load balancer IP.\n\n## Workspaces proxy installation into Kubernetes\n_After completing the Kubernetes cluster setup with one of your preferred providers, please continue with the next steps._\n\nAdd the Helm repository for the workspaces proxy (it is using the [Helm charts feature in the GitLab package registry](https://docs.gitlab.com/ee/user/packages/helm_repository/)).\n\n```shell\nhelm repo add gitlab-workspaces-proxy \\\n  https://gitlab.com/api/v4/projects/gitlab-org%2fremote-development%2fgitlab-workspaces-proxy/packages/helm/devel\n```\n\nInstall the gitlab-workspaces-proxy, and optionally [specify the most current chart version](https://gitlab.com/gitlab-org/remote-development/gitlab-workspaces-proxy/-/blob/main/helm/Chart.yaml). If you are using a different ingress controller than Nginx, you need to change the `ingress.className` key. Re-run the command when new TLS certificates need to be installed.\n\n```shell\nhelm repo update\n\nhelm upgrade --install gitlab-workspaces-proxy \\\n  gitlab-workspaces-proxy/gitlab-workspaces-proxy \\\n  --version 0.1.6 \\\n  --namespace=gitlab-workspaces \\\n  --create-namespace \\\n  --set=\"auth.client_id=${CLIENT_ID}\" \\\n  --set=\"auth.client_secret=${CLIENT_SECRET}\" \\\n  --set=\"auth.host=${GITLAB_URL}\" \\\n  --set=\"auth.redirect_uri=${REDIRECT_URI}\" \\\n  --set=\"auth.signing_key=${SIGNING_KEY}\" \\\n  --set=\"ingress.host.workspaceDomain=${GITLAB_WORKSPACES_PROXY_DOMAIN}\" \\\n  --set=\"ingress.host.wildcardDomain=${GITLAB_WORKSPACES_WILDCARD_DOMAIN}\" \\\n  --set=\"ingress.tls.workspaceDomainCert=$(cat ${WORKSPACES_DOMAIN_CERT})\" \\\n  --set=\"ingress.tls.workspaceDomainKey=$(cat ${WORKSPACES_DOMAIN_KEY})\" \\\n  --set=\"ingress.tls.wildcardDomainCert=$(cat ${WILDCARD_DOMAIN_CERT})\" \\\n  --set=\"ingress.tls.wildcardDomainKey=$(cat ${WILDCARD_DOMAIN_KEY})\" \\\n  --set=\"ingress.className=nginx\"\n```\n\nThe chart installs and configures the ingress automatically. You can verify the setup by getting the `Ingress` resource type:\n\n```shell\nkubectl get ingress -n gitlab-workspaces\n\nNAME                      CLASS   HOSTS                             ADDRESS   PORTS     AGE\ngitlab-workspaces-proxy   nginx   remote-dev.dev,*.remote-dev.dev             80, 443   9s\n```\n\n### Agent for Kubernetes installation\nCreate the agent configuration file in `.gitlab/agents/\u003Cagentname>/config.yaml`, add to git, and push it into the repository. The `remote_development` key specifies the `dns_zone`, which must be set to the workspaces domain. Additionally, the integration needs to be enabled. The `observability` key intentionally configures [debug logging](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#debug-the-agent) for the first setup to troubleshoot faster. You can adjust the `logging` levels for production usage.\n\n```shell\nexport GL_AGENT_K8S=remote-dev-dev\n\n$ mkdir agent-kubernetes && cd agent-kubernetes\n$ mkdir -p .gitlab/agents/${GL_AGENT_K8S}/\n\n$ cat \u003C\u003CEOF >.gitlab/agents/${GL_AGENT_K8S}/config.yaml\nremote_development:\n    enabled: true\n    dns_zone: \"${GITLAB_WORKSPACES_PROXY_DOMAIN}\"\n\nobservability:\n  logging:\n    level: debug\n    grpc_level: warn\nEOF\n\n$ git add .gitlab/agents/${GL_AGENT_K8S}/config.yaml\n$ git commit -avm \"Add agent for Kubernetes configuration\"\n# adjust the URL to your GitLab server URL and project path\n$ git remote add origin https://gitlab.example.com/remote-dev-workspaces/agent-kubernetes.git\n# will create a private project when https/PAT is used\n$ git push\n```\n\nOpen the GitLab project in your browser, navigate into `Operate > Kubernetes Clusters`, and click the `Connect a new cluster (agent)` button. Select the agent from the configuration dropdown, and click `Register`. The form generates a ready-to-use Helm chart CLI command. Similar to the command below, replace `XXXXXXXXXXREPLACEME` with the actual token value.\n\n```shell\nhelm repo add gitlab https://charts.gitlab.io\nhelm repo update\nhelm upgrade --install remote-dev-dev gitlab/gitlab-agent \\\n    --namespace gitlab-agent-remote-dev-dev \\\n    --create-namespace \\\n    --set image.tag=v16.0.1 \\\n    --set config.token=XXXXXXXXXXREPLACEME \\\n    --set config.kasAddress=wss://kas.gitlab.com # Replace with your self-managed GitLab KAS instance URL if not using GitLab.com SaaS\n```\n\nRun the commands, and verify that the agent is connected in the `Operate > Kubernetes Clusters` overview. You can access the pod logs using the following command:\n\n```shell\n$ kubectl get ns\nNAME                          STATUS   AGE\ngitlab-agent-remote-dev-dev   Active   9d\ngitlab-workspaces             Active   22d\n...\n\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-$GL_AGENT_K8S\n```\n\n_Congrats! Your infrastructure setup for on-demand, cloud-based development environments is complete._\n\n## Workspaces creation\nAfter completing the infrastructure setup, you must verify that all components work together and users can create workspaces. You can fork or import the [`example-python-http-simple` project](https://gitlab.com/gitlab-de/use-cases/remote-development/example-python-http-simple) into your GitLab group with access to the GitLab agent for Kubernetes to try it immediately. The project provides a simple Python web app with Flask that provides different HTTP routes. Alternatively, start with a new project and create a `.devfile.yaml` with the [example configuration](https://docs.gitlab.com/ee/user/workspace/#example-configurations).\n\nOptional: Inspect the [`.devfile.yaml`](https://docs.gitlab.com/ee/user/workspace/#devfile) file to learn about the configuration format. We will look into the `image` key later.\n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: py\n    attributes:\n      gl/inject-editor: true\n    container:\n      # Use a custom image that supports arbitrary user IDs.\n      # NOTE: THIS IMAGE IS NOT ACTIVELY MAINTAINED. DEMO USE CASES ONLY, DO NOT USE IN PRODUCTION.\n      # Source: https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id\n      image: registry.gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id:latest\n      memoryRequest: 1024M\n      memoryLimit: 2048M\n      cpuRequest: 500m\n      cpuLimit: 1000m\n      endpoints:\n        - name: http-python\n          targetPort: 8080\n```\n\n### Create the first workspaces\nNavigate to the `Your Work > Workspaces` menu and create a new workspace. Search for the project name, select the agent for Kubernetes, and create the workspace.\n\n![GitLab remote development workspaces, Python example](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python.png){: .shadow}\n\nOpen two terminals to follow the workspaces proxy and agent logs in the Kubernetes cluster.\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-workspaces-proxy -n gitlab-workspaces\n\n{\"level\":\"info\",\"ts\":1686331102.886607,\"caller\":\"server/server.go:74\",\"msg\":\"Starting proxy server...\"}\n{\"level\":\"info\",\"ts\":1686331133.146862,\"caller\":\"upstream/tracker.go:47\",\"msg\":\"New upstream added\",\"host\":\"8080-workspace-62029-5534214-2vxdxq.remote-dev.dev\",\"backend\":\"workspace-62029-5534214-2vxdxq.gl-rd-ns-62029-5534214-2vxdxq\",\"backend_port\":8080}\n2023/06/09 17:21:10 getHostnameFromState state=https://60001-workspace-62029-5534214-2vxdxq.remote-dev.dev/folder=/projects/demo-python-http-simple\n```\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-$GL_AGENT_K8S\n\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:19.839Z\",\"msg\":\"Applied event\",\"mod_name\":\"remote_development\",\"apply_event\":\"WaitEvent{ GroupName: \\\"wait-0\\\", Status: \\\"Pending\\\", Identifier: \\\"gl-rd-ns-62029-5534214-k66cjy_workspace-62029-5534214-k66cjy-gl-workspace-data__PersistentVolumeClaim\\\" }\",\"agent_id\":62029}\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:19.866Z\",\"msg\":\"Received update event\",\"mod_name\":\"remote_development\",\"workspace_namespace\":\"gl-rd-ns-62029-5534214-k66cjy\",\"workspace_name\":\"workspace-62029-5534214-k66cjy\",\"agent_id\":62029}\n{\"level\":\"debug\",\"time\":\"2023-06-09T18:36:43.627Z\",\"msg\":\"Applied event\",\"mod_name\":\"remote_development\",\"apply_event\":\"WaitEvent{ GroupName: \\\"wait-0\\\", Status: \\\"Successful\\\", Identifier: \\\"gl-rd-ns-62029-5534214-k66cjy_workspace-62029-5534214-k66cjy_apps_Deployment\\\" }\",\"agent_id\":62029}\n```\n\nWait until the workspace is provisioned successfully, and click to open the HTTP URL, example format `https://60001-workspace-62029-5534214-2vxdxq.remote-dev.dev/?folder=%2Fprojects%2Fexample-python-http-simple`. The GitLab OAuth application will ask you for authorization.\n\n![GitLab OAuth provider app, example with the Developer Evangelism demo environment](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_oauth_app.png){: .shadow}\n\nYou can select the Web IDE menu, open a new terminal (`cmd shift p` and search for `terminal create`). More shortcuts and Web IDE usage are documented [here](https://docs.gitlab.com/ee/user/project/web_ide/).\n\n![GitLab remote development workspaces, Python example, create terminal](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_create_terminal.png){: .shadow}\n\nUsing the Python example project, try to run the `hello.py` file with the Python interpreter after changing the terminal to `bash` to access auto-completion and shell history. Type `pyth`, press tab, type `hel`, press tab, enter.\n\n```shell\n$ bash\n\n$ python hello.py\n```\n\nThe command will fail because the Python requirements still need to be installed. Let us fix that by running the following command:\n\n```shell\n$ pip install -r requirements.txt\n```\n\n![GitLab remote development workspaces, Python example, install requirements in the terminal](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_terminal_install_pip.png){: .shadow}\n\n**Note**: This example is intentionally kept simple, and does not use best practices with `pyenv` for managing Python environments. We will explore development environment templates in future blog posts.\n\nRun the Python application `hello.py` again to start the web server on port 8080.\n\n```shell\n$ python hello.py\n```\n\nYou can access the exposed port by modifying the URL from the default port at the beginning of the URL to the exposed port `8080`. The `?folder` URL parameter can also be removed.\n\n```diff\n-https://60001-workspace-62029-5534214-kbtcmq.remote-dev.dev/?folder=/projects/example-python-http-simple\n+https://8080-workspace-62029-5534214-kbtcmq.remote-dev.dev/\n```\n\nThe URL is not publicly available and requires access through the GitLab OAuth session.\n\n![GitLab remote development workspaces, Python example, run webserver, access HTTP](https://about.gitlab.com/images/blogimages/infrastructure-cloud-development-environments/gitlab_remote_dev_workspaces_python_web_ide_terminal_run_webserver_access_http.png){: .shadow}\n\nModifying the workspace requires custom container images supporting to run with [arbitrary user IDs](https://docs.gitlab.com/ee/user/workspace/#arbitrary-user-ids). The example project uses a custom image which allows to install Python dependencies and create build artifacts. It also allows to use the bash terminal shown above. Learn more about custom image creation in the next section.\n\n### Custom workspace container images\nCustom container images require support for [arbitrary user IDs](https://docs.gitlab.com/ee/user/workspace/#arbitrary-user-ids). You can build custom container images with [GitLab CI/CD](/solutions/continuous-integration/) and use the [GitLab container registry](https://docs.gitlab.com/ee/user/packages/container_registry/) to distribute the container images on the DevSecOps platform.\n\nWorkspaces run with arbitrary user IDs in the Kubernetes cluster containers and manage resource access with Linux group permissions. Existing container images may need to be changed, and imported as base image for new container images. The [following example](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile) uses the `python:3.11-slim-bullseye` image from Docker Hub as a base container image in the `FROM` key. The next steps create and set a home directory in `/home/gitlab-workspaces`, and manage user and group access to specified directories. Additionally, you can install more convenience tools and configurations into the image, for example the `git` package.\n\n[`Dockerfile`](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile)\n```\n# Example demo for a Python-based container image.\n# NOTE: THIS IMAGE IS NOT ACTIVELY MAINTAINED. DEMO USE CASES ONLY, DO NOT USE IN PRODUCTION.\n\nFROM python:3.11-slim-bullseye\n\n# User id for build time. Runtime will be an arbitrary random ID.\nRUN useradd -l -u 33333 -G sudo -md /home/gitlab-workspaces -s /bin/bash -p gitlab-workspaces gitlab-workspaces\n\nENV HOME=/home/gitlab-workspaces\n\nWORKDIR $HOME\n\nRUN mkdir -p /home/gitlab-workspaces && chgrp -R 0 /home && chmod -R g=u /etc/passwd /etc/group /home\n\n# TODO: Add more convenience tools into the user home directory, i.e. enable color prompt for the terminal, install pyenv to manage Python environments, etc\nRUN apt update && \\\n    apt -y --no-install-recommends install git procps findutils htop vim curl wget && \\\n    rm -rf /var/lib/apt/lists/*\n\nUSER gitlab-workspaces\n```\n\n **As an exercise**, [fork the project](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id) and modify the package installation step in the `Dockerfile` file to install the `dnsutils` package on the Debian based image to get access to the `dig` command.\n\n[`Dockerfile`](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/-/blob/main/Dockerfile)\n```diff\n-RUN apt update && \\\n-    apt -y --no-install-recommends install git procps findutils htop vim curl wget && \\\n-    rm -rf /var/lib/apt/lists/*\n+RUN apt update && \\\n+    apt -y --no-install-recommends install git procps findutils htop vim curl wget dnsutils && \\\n+    rm -rf /var/lib/apt/lists/*\n```\n\n[Build the container image](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html) with your preferred CI/CD workflow. On GitLab.com SaaS, you can include the `Docker.gitlab-ci.yml` template which takes care of building the image.\n\n```yaml\ninclude:\n    - template: Docker.gitlab-ci.yml\n```\n\nWhen building the container images manually, use Linux and `amd64` as platform architecture [until multi-architecture support is available for running workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10594). Also, review the [optimizing images guide in the documentation](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html#optimize-docker-images) when creating custom container images to optimize size and build times.\n\nNavigate into `Deploy > Container Registry` in the GitLab UI and copy the image URL from the tagged image. Open the `.devfile.yaml` file in the forked GitLab project `example-python-http-simple`, and change the `image` path to the newly built image URL.\n\n[`.devfile.yaml`](https://gitlab.com/gitlab-de/use-cases/remote-development/example-python-http-simple/-/blob/main/.devfile.yaml)\n```diff\n-      image: registry.gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id:latest\n+      image: registry.gitlab.example.com/remote-dev-workspaces/python-remote-dev-workspaces-user-id:latest\n```\n\nNavigate into `Your Work > Workspaces` and create a new workspace for the project, and try to execute the `dig` command to query the IPv6 address of GitLab.com (or any other internal domain).\n\n```shell\n$ dig +short gitlab.com AAAA\n```\n\nThe custom container image project is located [here](https://gitlab.com/gitlab-de/use-cases/remote-development/container-images/python-remote-dev-workspaces-user-id/).\n\n## Tips\nThis blog post's setup steps with environment variables are easy to follow. For production usage, use automation to manage your environment with Terraform, Ansible, etc.\n\n- Terraform: [Provision a GKE Cluster (Google Cloud)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/gke), [Provision an EKS Cluster (AWS)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/eks), [Provision an AKS Cluster (Azure)](https://developer.hashicorp.com/terraform/tutorials/kubernetes/aks), [Deploy Applications with the Helm Provider](https://developer.hashicorp.com/terraform/tutorials/kubernetes/helm-provider)\n- Ansible: [google.cloud.gcp_container_cluster module](https://docs.ansible.com/ansible/latest/collections/google/cloud/gcp_container_cluster_module.html), [community.aws.eks_cluster module](https://docs.ansible.com/ansible/latest/collections/community/aws/eks_cluster_module.html), [azure.azcollection.azure_rm_aks module](https://docs.ansible.com/ansible/latest/collections/azure/azcollection/azure_rm_aks_module.html), [kubernetes.core collection](https://docs.ansible.com/ansible/latest/collections/kubernetes/core/index.html#plugin-index)\n\n### Certificate management\nThe workspaces domain requires a valid TLS certificate. The examples above used certbot with Let's Encrypt, requiring a certificate renewal after three months. Depending on your corporate requirements, you may need to create TLS certificates signed by the corporate CA identity and manage the certificates. Alternatively, you can look into solutions like [cert-manager for Kubernetes](https://cert-manager.io/docs/getting-started/) that will help renew certificates automatically.\n\nDo not forget to add TLS certificate validity monitoring to avoid unforeseen errors. The [blackbox exporter for Prometheus](https://github.com/prometheus/blackbox_exporter) can help with monitoring TLS certificate expiry and send alerts.\n\n### Troubleshooting\nHere are a few tips for troubleshooting connections and inspecting the cluster resources.\n\n#### Verify the connections\nTry to connect to the workspaces domain to see whether the Kubernetes Ingress controller responds to HTTP requests.\n\n```shell\n$ curl -vL ${GITLAB_WORKSPACES_PROXY_DOMAIN}\n```\n\nInspect the logs of the proxy deployment to follow connection requests. Since the proxy requires an authorization token sent via the OAuth app, an HTTP 400 error is expected for unauthenticated curl requests.\n\n```shell\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-workspaces-proxy -n gitlab-workspaces\n```\n\nCheck if the TLS certificate is valid. You can also use `sslcan` and other tools.\n\n```shell\n$ openssl s_client -connect ${GITLAB_WORKSPACES_PROXY_DOMAIN}:443\n\n$ sslcan ${GITLAB_WORKSPACES_PROXY_DOMAIN}\n```\n\n[Debug the agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#debug-the-agent) and inspect the pod logs.\n\n```shell\n$ kubectl get ns\n\n$ kubectl logs -f -l app.kubernetes.io/name=gitlab-agent -n gitlab-agent-\u003CNAMESPACENAME>\n```\n\n#### Workspaces cannot be created even if the agent is connected\nWhen the workspaces deployment is spinning and nothing happens, try restarting the workspaces proxy and agent for Kubernetes. This is a known problem and tracked [in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/414399#note_1426652421).\n\n```shell\n$ kubectl rollout restart deployment -n gitlab-workspaces\n\n$ kubectl rollout restart deployment -n gitlab-agent-$GL_AGENT_K8S\n```\n\nIf the agent for Kubernetes remains unresponsive, consider a complete reinstall. First, navigate into the GitLab UI into `Operate > Kubernetes Clusters` and [delete the agent](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html#remove-an-agent-through-the-gitlab-ui). Next, use the following commands to delete the Helm release from the cluster, and run the installation command generated from the UI again.\n\n```shell\nkubectl get ns\nhelm list -A\n\nexport RELEASENAME=xxx\nexport NAMESPACENAME=xxx\nexport TOKEN=XXXXXXXXXXREPLACEME\nhelm uninstall ${RELEASENAME} -n gitlab-agent-${NAMESPACENAME}\n\nhelm repo add gitlab https://charts.gitlab.io\nhelm repo update\n\nhelm upgrade --install ${RELEASENAME} gitlab/gitlab-agent \\\n    --namespace gitlab-agent-${NAMESPACENAME} \\\n    --create-namespace \\\n    --set image.tag=v16.1.2 \\\n    --set config.token=${TOKEN} \\\n    --set config.kasAddress=wss://kas.gitlab.com # Replace with your self-managed GitLab KAS instance URL if not using GitLab.com SaaS\n```\n\nExample: `helm uninstall remote-dev-dev -n gitlab-agent-remote-dev-dev`\n\n#### Cannot modify workspace using custom images\nIf you cannot modify the workspace, open a new terminal and check the user id and their groups.\n\n```shell\n$ id\n```\n\nInspect the `.devfile.yaml` file in the project and extract the `image` attribute to test the used container image. You can use container CLI, for example `docker` that runs the container with a different user ID. Note: You can use any user ID to test the behavior.\n\nTip: Use grep and cut commands to extract the image attribute URL from the `.devfile.yaml`.\n\n```shell\n$ cat .devfile.yaml | grep image: | cut -f2 -d ':')\n```\n\nRun the following command to execute the `id` command in the container, and print the user information.\n\n```\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname id\n```\n\nTry to modify the workspace by running the command `echo 'Hi' >> ~/example.md`. This can fail with a permission error.\n\n```shell\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname echo 'Hi' >> ~/example.md\n```\n\nIf the above command failed, the Linux user group does not have enough permissions to modify the file. You can view the permissions using the `ls` command.\n\n```shell\n$ docker run -u 1234 -ti registry.gitlab.com/path/to/project/image:tagname ls -lart ~/\n```\n\n### Contribute\nThe [remote development developer documentation](https://gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs) provides insights into the [architecture blueprint](https://docs.gitlab.com/ee/architecture/blueprints/remote_development/) and how to set up a local development environment to [start contributing](/community/contribute/). In the future, we will be able to use remote development workspaces to develop remote development workspaces.\n\n## Share your feedback\nIn this blog post, you have learned how to manage the infrastructure for remote development workspaces, create your first workspace, and more tips on custom workspace images and troubleshooting. Using the same development environment across organizations and communities, developers can focus on writing code and get fast preview feedback (i.e., by running a web server that can be accessed externally in the remote workspace). Providing the same reproducible environment also helps opensource contributors to reproduce bugs and provide feedback most efficiently. They can use the same best practices as upstream maintainers.\n\nDevelopers and DevOps engineers will be using the Web IDE in workspaces. Later, being able to [connect their desktop client to workspaces](https://gitlab.com/groups/gitlab-org/-/epics/10478), they can take advantage of even more efficiency with the [most comprehensive AI-powered DevSecOps platform](/gitlab-duo/): Code suggestions and more AI-powered workflows are just one fingertip away.\n\nWhat will your teams build with remote development workspaces? Please share your experiences in the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031), blog about your setup, and join our [community forum](https://forum.gitlab.com/) for more discussions.\n\nCover image by [Nick Karvounis](https://unsplash.com/@nickkarvounis) on [Unsplash](https://unsplash.com/photos/SmIM3m8f3Pw)",[542,1634,563,1429,9],{"slug":2464,"featured":6,"template":734},"set-up-infrastructure-for-cloud-development-environments","content:en-us:blog:set-up-infrastructure-for-cloud-development-environments.yml","Set Up Infrastructure For Cloud Development Environments","en-us/blog/set-up-infrastructure-for-cloud-development-environments.yml","en-us/blog/set-up-infrastructure-for-cloud-development-environments",{"_path":2470,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2471,"content":2477,"config":2482,"_id":2484,"_type":14,"title":2485,"_source":16,"_file":2486,"_stem":2487,"_extension":19},"/en-us/blog/shifting-from-on-prem-to-cloud",{"title":2472,"description":2473,"ogTitle":2472,"ogDescription":2473,"noIndex":6,"ogImage":2474,"ogUrl":2475,"ogSiteName":720,"ogType":721,"canonicalUrls":2475,"schema":2476},"Shifting from on-prem to cloud","The challenges of being on-prem and what to consider when shifting to public cloud.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679664/Blog/Hero%20Images/on-prem-to-cloud.jpg","https://about.gitlab.com/blog/shifting-from-on-prem-to-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Shifting from on-prem to cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-01-09\",\n      }",{"title":2472,"description":2473,"authors":2478,"heroImage":2474,"date":2479,"body":2480,"category":815,"tags":2481},[772],"2020-01-09","\n\nCloud computing and cloud adoption are perennial topics when talking about scalability and growth, but many enterprises still operate a significant portion of their workloads in legacy environments. With so much information on the reduced infrastructure costs and the elasticity of public cloud, why do organizations still do all the work themselves?\n\nIn this discussion with Sr. Product Marketing Manager [William Chia](/company/team/#williamchia), we talk about the challenges traditional IT teams face, the barriers to cloud adoption, and strategies to consider for making the leap.\n\n\n## Why organizations use traditional IT\n\nThe reasons that an organization may want to manage their own infrastructure are myriad and geared toward unique needs and/or limitations within their organization.\n\n\n### Regulatory/Compliance\n\nIn highly-regulated industries such as banking and healthcare, or even government entities, there may be compliance concerns or risks that prevent them from utilizing public cloud. More control means more oversight and more accountability. \"If I need to keep patient data private to comply with HIPAA, for example, if I keep 100% control of my systems and infrastructure I can ensure I comply. If I outsource to cloud services then I have to take different steps to ensure I'm not leaking PII,\" says William. Even though the big cloud providers – namely GCP, AWS, and Azure – have compliance built-in, some organizations may still be hesitant to have them assume those risks.\n\n\n### Protecting sensitive data\n\nIT leaders surveyed in a [Cloud Security Alliance report](https://www.skyhighnetworks.com/cloud-security-blog/5-surprising-truths-from-the-cloud-security-alliances-latest-survey/) expressed that, while they are confident in cloud security capabilities, there are things that can go wrong beyond their control: Inside threats, compromised accounts, and misconfigured security settings up the stack that can all lead to security breaches. According to nearly 68% of the IT leaders surveyed, the ability to enforce corporate security policies is the [number one barrier to moving applications to the cloud](https://www.skyhighnetworks.com/cloud-security-blog/5-surprising-truths-from-the-cloud-security-alliances-latest-survey/). \"The top-level concern basically comes down to control and data privacy,\" says William.\n\n\n### Better costs\n\nFor companies operating at a small scale, cloud computing’s pay-per-use model will almost always be cheaper than managing your own data centers, but for larger-scale organizations that isn’t always the case. \"There's a breaking point… If you run on-prem, it actually could be cheaper than your cloud bill at huge scale, but you’re running so much software you’re basically running your own private cloud at that point,\" says William. For a long-term strategy, organizations have to weigh their CapEx vs OpEx costs, and while [CapEx involves a large upfront expense in whole systems and servers](https://www.10thmagnitude.com/opex-vs-capex-the-real-cloud-computing-cost-advantage/), and the continued cost of maintenance, the computing volume could make this a worthwhile investment.\n\nAnother reason that companies may run their own infrastructure is because that’s how they’ve always done it. While not a very scientific answer, it’s the reality for many companies, especially those that grew before the age of cloud.\n\n\"Once upon a time, if you were a large enterprise and you had to run a lot of software, you had no choice but to manage it all yourself. And so now you have all these servers, you have all of these staff, and you have all of these business processes. You have a great deal of both physical and logical infrastructure and if you want to move to the cloud you have to change all of it. That comes at a very high cost,\" says William.\n\nIn the past, moving small amounts of data was relatively easy. When we start talking about exabytes of data, rather than terabytes of data, the process of migration becomes herculean. According to Jean-Luc Valente, the VP for product management in the cloud platforms and solutions group at Cisco, egressing that kind of data to a public cloud could [cost as much as $30 million dollars](https://www.zdnet.com/article/multicloud-is-here-but-challenges-remain/).\n\n\n\n\n\n## The challenges of on-prem infrastructure\n\nWhile organizations may have specific reasons for running on-premises infrastructure, that decision comes with distinct challenges.\n\n\n### Range of expertise\n\n\"Above a certain level, you are managing all of your infrastructure and you're managing all of your uptime. That's a lot of expertise. You need to become as good at operating a cloud infrastructure as Amazon or Google is, which is why those public clouds are so radically popular. In order to get there requires a lot of resources,\" says William.\n\n\n### Managing software and hardware\n\nIn order to manage uptime and security, operations teams need to perform software maintenance like upgrades and patches in addition to managing physical assets like servers, racks, power supplies, and network switches. At a certain point, an organization is devoting a lot of resources to just keeping things running rather than innovating, so all of these resources are being invested in undifferentiated engineering.\n\n\n### Undifferentiated engineering\n\nIf it is not a core competency for your organization, then it’s undifferentiated engineering. \"If you don't need to manage that on-premises data center or servers for a specific reason, then the cloud is more attractive because that's a high cost,\" says William. \"You're spending a lot of engineering dollars on things that are not differentiating you in the marketplace.\"\n\n\n## Strategies for shifting to cloud\n\n\n### The benefits of \"lift-and-shift\"\n\nIn previous posts, we’ve talked about [legacy and monolithic applications acting as a barrier](/blog/cloud-adoption-roadmap/) for cloud adoption, but there can be some benefit to lifting and shifting those applications to the cloud. While you may not be able to take full advantage of microservices and cloud native application development, shifting those applications to the cloud does provide the benefit of reducing your operational overhead. This can provide an opportunity to learn new competencies.\n\n\"There's a separate set of competencies that you need to acquire to start running in the cloud. You don’t need to learn everything all at once. If you take a monolithic, on-premises app, simply lift-and-shift it into a VM in the cloud, that allows you to start to understand things like cloud billing, and gain some of the competencies of a cloud deployment pattern,\" says William.\n\n\n### Hybrid cloud\n\nMany organizations have opted to use both private and public cloud for a hybrid cloud infrastructure. These hybrid clouds blend the control and security of a private cloud, but also the flexibility and agility of public cloud. During periods of high usage, organizations can leverage public cloud’s pay-per-use model and save themselves from needing additional infrastructure. Organizations can use their private cloud for sensitive data and public cloud for developing and testing new applications. Having a hybrid cloud environment allows teams to manage their on-premises infrastructure and take advantage of public cloud scale.\n\n\nWhile cloud adoption is widespread, many organizations have unique reasons to stay or migrate to an on-premises infrastructure. Cost, control, and risk mitigation continue to be the main drivers of on-prem vs. cloud decisions. Public cloud’s pay-per-use model may not be more cost effective for organizations that operate at higher scale, but a hybrid cloud model can offer organizations the flexibility to use public cloud during periods of high usage without having to invest in additional infrastructure. Both on-prem and cloud require unique and extensive operational competencies, so teams will need leaders that are skilled in these areas when making the switch.\n\n\nCover image by [Matt Howard](https://unsplash.com/@thematthoward?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/journey?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[563,9],{"slug":2483,"featured":6,"template":734},"shifting-from-on-prem-to-cloud","content:en-us:blog:shifting-from-on-prem-to-cloud.yml","Shifting From On Prem To Cloud","en-us/blog/shifting-from-on-prem-to-cloud.yml","en-us/blog/shifting-from-on-prem-to-cloud",{"_path":2489,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2490,"content":2495,"config":2500,"_id":2502,"_type":14,"title":2503,"_source":16,"_file":2504,"_stem":2505,"_extension":19},"/en-us/blog/simplify-your-cloud-account-management-for-kubernetes-access",{"title":2491,"description":2492,"ogTitle":2491,"ogDescription":2492,"noIndex":6,"ogImage":1420,"ogUrl":2493,"ogSiteName":720,"ogType":721,"canonicalUrls":2493,"schema":2494},"Simplify your cloud account management for Kubernetes access","In this tutorial, learn how to use the GitLab agent for Kubernetes and its user impersonation features for secure cluster access.\n\n","https://about.gitlab.com/blog/simplify-your-cloud-account-management-for-kubernetes-access","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Simplify your cloud account management for Kubernetes access\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2024-03-19\",\n      }",{"title":2491,"description":2492,"authors":2496,"heroImage":1420,"date":2497,"body":2498,"category":979,"tags":2499},[1845],"2024-03-19","We hear you: Managing cloud accounts is risky, tedious, and time-consuming,\nbut also a must-have in many situations. You might run your Kubernetes\nclusters with one of the hyperclouds, and your engineers need to access at\nleast the non-production cluster to troubleshoot issues quickly and\nefficiently. Sometimes, you also need to give special, temporary access to\nengineers on a production cluster.\n\n\nYou have also told us that access requests might not come very often, but\nwhen they do, they are urgent, and given the high security requirements\naround the process, they can take close to a week to fulfill. \n\n\nBy giving access to your cloud infrastructure, you automatically expose\nyourself to risks. As a result, it's a best practice to restrict access only\nto the resources the given user must have access to. However, cloud identity\nand access management (IAM) is complex by nature. \n\n\nIf you are using Kubernetes and you need to give access specifically to your\nclusters only, GitLab can help. Your user will be able to identify with your\ncluster, so you can configure the Kubernetes role-based access controls\n(RBAC) to restrict their access within the cluster. With GitLab, and\nspecifically the GitLab agent for Kubernetes, you can start at the last step\nand focus only on the RBAC aspect.\n\n\n## What is the GitLab agent for Kubernetes?\n\n\nThe GitLab agent for Kubernetes is a set of GitLab components that allows a\npermanent, bi-directional streaming channel between your GitLab instance and\nyour Kubernetes cluster (one agent per cluster). Once the agent connection\nis configured, you can share it across projects and groups within your\nGitLab instance, allowing a single agent to serve all the access needs of a\ncluster.\n\n\nCurrently, the agent has several features to simplify your Kubernetes\nmanagement tasks:\n\n\n* [Integrates with GitLab\nCI/CD](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html)\nfor push-based deployments or regular cluster management jobs. The\nintegration exposes a Kubernetes context per available agent in the Runner\nenvironment, and any tool that can take a context as an input (e.g. kubectl\nor helm CLI) can reach your cluster from the CI/CD jobs.\n\n* Integrates with the GitLab GUI, specifically the environment pages. Users\ncan configure [an environment to show the Kubernetes\nresources](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html)\navailable in a specific namespace, and even set up a Flux resource to track\nthe reconciliation of your applications.\n\n* Enables users to use the GitLab-managed channel to [connect to the cluster\nfrom their local\nlaptop](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html#access-a-cluster-with-the-kubernetes-api),\nwithout giving them cloud-specific Kubernetes access tokens.\n\n* Supports [Flux GitRepository\nreconciliations](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html#immediate-git-repository-reconciliation)\nby triggering a reconciliation automatically on new commits in repositories\nthe agent can access.\n\n* [Runs operational container\nscans](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html)\nand shows the reports in the GitLab UI.\n\n* Enables you to enrich the [remote\ndevelopment](https://docs.gitlab.com/ee/user/project/remote_development/)\noffering with [workspaces](https://docs.gitlab.com/ee/user/workspace/).\n\n\n> Try simplifying your cloud account management for Kubernetes access today\nwith [a free trial of GitLab Ultimate](https://gitlab.com/-/trials/new).\n\n\n## The agent and access management\n\n\nThe GitLab agent for Kubernetes, which is available for GitLab Ultimate and\nPremium, impersonates various GitLab-specific users when it acts on behalf\nof GitLab in the cluster.\n\n\n* For the GitLab CI/CD integration, the agent impersonates the CI job as the\nuser, and enriches the user with group specific metadata that describe the\nproject and the group.\n\n\n* For the environment and local connections, the agent impersonates the\nGitLab user using the connection, and similarly to the CI/CD integration,\nthe impersonated Kubernetes user is enriched with group specific metadata,\nlike roles in configured groups.\n\n\nAs this article is about using the agent instead of cloud accounts for\ncluster access, let’s focus on the environment and local connections setup.\n\n\n## An example setup\n\n\nTo offer a realistic setup, let’s assume that in our GitLab instance we have\nthe following groups and projects:\n\n\n* `/app-dev-group/team-a/service-1`\n\n* `/app-dev-group/team-a/service-2`\n\n* `/app-dev-group/team-b/service-3`\n\n* `/platform-group/clusters-project`\n\n\nIn the above setup, the agents are registered against the `clusters-project`\nproject and, in addition to other code, the project contains the agent\nconfiguration files:\n\n\n* `.gitlab/agents/dev-cluster/config.yaml`\n\n* `.gitlab/agents/prod-cluster/config.yaml`\n\n\nThe `dev-cluster` and `prod-cluster` directory names are actually the agent\nnames as well, and registered agents and related events can be seen within\nthe projects “Operations/Kubernetes clusters” menu item. The agent offers\nsome minimal features by default, without a configuration file. To benefit\nfrom the user access features and to share the agent connection across\nprojects and groups, a configuration file is required.\n\n\nLet’s assume that we want to configure the agents in the following way:\n\n\n* For the development cluster connection:\n\n    * Everyone with at least developer role in team-a should be able to read-write their team specific namespace `team-a` only.\n    * Everyone with group owner role in team-a should have namespace admin rights on the `team-a` namespace only.\n    * Members of `team-b` should not be able to access the cluster.\n\n* For the production cluster connection:\n\n    * Everyone with at least developer role in team-a should be able to read-only their team specific namespace `team-a` only.\n    * Members of `team-b` should not be able to access the cluster.\n\nFor the development cluster, the above setup requires an agent configuration\nfile in `.gitlab/agents/dev-cluster/config.yaml` as follows:\n\n\n```yaml\n\nuser_access:\n  access_as:\n    user: {}\n  groups:\n    - id: app-dev-group/team-a # group_id=1\n    - id: app-dev-group/team-b # group_id=2\n```\n\n\nIn this code snippet we added the group ID of the specific groups in a\ncomment. We will need these IDs in the following Kubernetes RBAC\ndefinitions:\n\n\n```yaml\n\napiVersion: rbac.authorization.k8s.io/v1\n\nkind: RoleBinding\n\nmetadata:\n  name: team-a-dev-can-edit\n  namespace: team-a\nroleRef:\n  name: edit\n  kind: ClusterRole\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - name: gitlab:group_role:1:developer\n    kind: Group\n```\n\n\nand...\n\n\n```yaml\n\napiVersion: rbac.authorization.k8s.io/v1\n\nkind: RoleBinding\n\nmetadata:\n  name: team-a-owner-can-admin\n  namespace: team-a\nroleRef:\n  name: admin\n  kind: ClusterRole\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - name: gitlab:group_role:1:owner\n    kind: Group\n```    \n\n\nThe above two code snippets can be applied to the cluster with the GitLab\nFlux integration or manually via `kubectl`. They describe role bindings for\nthe `team-a` group members. It’s important to note that only the groups and\nprojects from the agent configuration file can be targeted as RBAC groups.\nTherefore, the following RBAC will not work as the impersonated user\nresources don’t know about the referenced projects:\n\n\n```yaml\n\napiVersion: rbac.authorization.k8s.io/v1\n\nkind: RoleBinding\n\nmetadata:\n  name: team-a-dev-can-edit\n  namespace: team-a\nroleRef:\n  name: edit\n  kind: ClusterRole\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - name: gitlab:project_role:3:developer # app-dev-group/team-a/service-1 project ID is 3\n    kind: Group\n```\n\n\nFor the production cluster we need the same agent configuration under\n`.gitlab/agents/prod-cluster/config.yaml` and the following RBAC\ndefinitions:\n\n\n```yaml\n\napiVersion: rbac.authorization.k8s.io/v1\n\nkind: RoleBinding\n\nmetadata:\n  name: team-a-dev-can-read\n  namespace: team-a\nroleRef:\n  name: view\n  kind: ClusterRole\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - name: gitlab:group_role:1:developer\n    kind: Group\n```\n\n\nThese configurations allow project owners to set up the environment pages so\nmembers of `team-a` will be able to see the status of their cluster\nworkloads in real-time and they should be able to access the cluster from\ntheir local computers using their favorite Kubernetes tools.\n\n\n## Explaining the magic\n\n\nIn the previous section, you learned how to set up role bindings for group\nmembers with specific roles. In this section, let's dive into the\nimpersonated user and their attributes.\n\n\nWhile Kubernetes does not have a User or Group resource, its authentication\nand authorization scheme pretends to have it. Users have a username, can\nbelong to groups, and can have other extra attributes.\n\n\nThe impersonated GitLab user carries the `gitlab:username:\u003Cusername>` in the\ncluster. For example, if our imaginary user Béla has the GitLab username\n`bela`, then in the cluster the impersonated user will be called\n`gitlab:username:bela`. This allows targeting of a specific user in the\ncluster.\n\n\nEvery impersonated user belongs to the `gitlab:user` group. Moreover, for\nevery project and group listed in the agent configuration, we check the\ncurrent user’s role and add it as a group. This is more easily understood\nthrough an example, so let’s modify a little bit the agent configuration we\nused above.\n\n\n```yaml\n\nuser_access:\n  access_as:\n    user: {}\n  projects:\n    - id: platform-group/clusters-project # project_id=1\n  groups:\n    - id: app-dev-group/team-a # group_id=1\n    - id: app-dev-group/team-b # group_id=2\n```\n\n\nFor the sake of example, let’s assume the contrived setup that our user Béla\nis a maintainer in the `platform-group/clusters-project` project, is a\ndeveloper in `app-dev-group/team-a` group, and an owner of the\n`app-dev-group/team-a/service-1` project. In this case, the impersonated\nKubernetes user `gitlab:username:bela` will belong to the following groups:\n\n\n* `gitlab:user`\n\n* `gitlab:project_role:1:developer`\n\n* `gitlab:project_role:1:maintainer`\n\n* `gitlab:group_role:1:developer`\n\n\nWhat happens is that we check Béla’s role in every project and group listed\nin the agent configuration, and set up all the roles that Béla has there. As\nBéla is a maintainer in `platform-group/clusters-project` (project ID 1), we\nadd him to both the `gitlab:project_role:1:developer` and\n`gitlab:project_role:1:maintainer` groups. Note as well, that we did not add\nany groups for the `app-dev-group/team-a/service-1` project, only its parent\ngroup that appears in the agent configuration.\n\n\n## Simplifying cluster management\n\n\nSetting up the agent and configuring the cluster as presented above is\neverything you need to model the presented access requirements in the\ncluster. You don’t have to manage cloud accounts or add in-cluster account\nmanagement tools like Dex. The agent for Kubernetes and its user\nimpersonation features can simplify your infrastructure management work.\n\n\nWhen new people join your company, once they become members of the `team-a`\nthey immediately get access to the clusters as configured above. Similarly,\nas someone leaves your company, you just have to remove them from the group\nand their access will be disabled. As we mentioned, the agent supports local\naccess to the clusters, too. As that local access runs through the\nGitLab-side agent component, it will be disabled as well when users are\nremoved from the `team-a` group.\n\n\nSetting up the agent takes around two-to-five minutes per cluster. Setting\nup the required RBAC might take another five minutes. In 10 minutes, users\ncan get controlled access to a cluster, saving days of work and decreasing\nthe risks associated with cloud accounts.\n\n\n## Get started today\n\n\nIf you want to try this approach and allow access to your colleagues to some\nof your clusters without managing cloud accounts, the following\ndocumentation pages should help you to get started:\n\n\n- On self-managed GitLab instances, you might need to [configure the\nGitLab-side component (called\nKAS)](https://docs.gitlab.com/ee/administration/clusters/kas.html) of the\nagent for Kubernetes first.\n\n\n- You can learn more about [all the Kubernetes management features\nhere](https://docs.gitlab.com/ee/user/clusters/agent/), or you can\nimmediately dive in by [installing an\nagent](https://docs.gitlab.com/ee/user/clusters/agent/install/), and\n[granting users access to\nKubernetes](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html).\n\n\n- You’ll likely want to [configure a Kubernetes\ndashboard](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html)\nfor your deployed application.\n\n\n> Try simplifying your cloud account management for Kubernetes access today\nwith [a free trial of GitLab Ultimate](https://gitlab.com/-/trials/new).\n",[9,979,859,1146],{"slug":2501,"featured":91,"template":734},"simplify-your-cloud-account-management-for-kubernetes-access","content:en-us:blog:simplify-your-cloud-account-management-for-kubernetes-access.yml","Simplify Your Cloud Account Management For Kubernetes Access","en-us/blog/simplify-your-cloud-account-management-for-kubernetes-access.yml","en-us/blog/simplify-your-cloud-account-management-for-kubernetes-access",{"_path":2507,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2508,"content":2514,"config":2519,"_id":2521,"_type":14,"title":2522,"_source":16,"_file":2523,"_stem":2524,"_extension":19},"/en-us/blog/sourcegraph-code-intelligence-integration-for-gitlab",{"title":2509,"description":2510,"ogTitle":2509,"ogDescription":2510,"noIndex":6,"ogImage":2511,"ogUrl":2512,"ogSiteName":720,"ogType":721,"canonicalUrls":2512,"schema":2513},"Native code intelligence is coming to GitLab","We're enhancing code review with Sourcegraph – no extra plugins required.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673090/Blog/Hero%20Images/random_code.jpg","https://about.gitlab.com/blog/sourcegraph-code-intelligence-integration-for-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Native code intelligence is coming to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mayank Tahilramani\"}],\n        \"datePublished\": \"2019-11-12\",\n      }",{"title":2509,"description":2510,"authors":2515,"heroImage":2511,"date":2516,"body":2517,"category":729,"tags":2518},[1630],"2019-11-12","\nAlmost a year ago, our CEO [Sid Sijbrandij](/company/team/#sytses) opened an issue proposing [GitLab integrate with Sourcegraph to provide advanced code navigation and cross-referencing functionality for source code we host](https://gitlab.com/gitlab-org/gitlab/issues/20642). We knew this feature would be a big improvement to the Developer UX in our product, particularly for efficient code review. We also knew [Sourcegraph](https://about.sourcegraph.com/) has an open-core product with one of the best-in-class code navigation capabilities. It only made sense to have a tighter integration between the two products.\n\n## How we built this\n\nSo, our generous friends at Sourcegraph got to work. A [browser extension supporting GitLab](https://docs.sourcegraph.com/integration/gitlab) was already available, but Sourcegraph collaborated with our engineering and product management teams and added the integration directly to the GitLab codebase – powered by GitLab.com and Sourcegraph.com. The integration gives users a fully browser-based developer platform, with no extra plugins required.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/LjVxkt4_sEA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\nGitLab CEO and co-founder Sid Sijbrandij and Sourcegraph CEO and co-founder Quinn Slack explain the new integration.\n{: .note.text-center}\n\nFor now, get a sneak preview of how our integration with Sourcegraph works by watching a [quick screencast tutorial](https://vimeo.com/372226334/de668e24fa).\n\nThe process of building the integration between Sourcegraph and GitLab is a great example of our [transparency](https://handbook.gitlab.com/handbook/values/#transparency) and [collaboration](https://handbook.gitlab.com/handbook/values/#collaboration) values at work.\n\n## Collaboration in the open\n\n[Sourcegraph’s contribution to GitLab](https://gitlab.com/gitlab-org/gitlab/merge_requests/16556) is significant for developer productivity. For example, their merge request (MR) adds native support for features like ‘go-to-definition’ and ‘find references’ within a hover tooltip. Users can engage the tooltip UI in code views, file views, merge requests, and code diffs. Developers can stay in context during code reviews when they need to investigate a function implementation by simply hovering over the name of the function to navigate efficiently. Within the tooltip, users can see the definition of the function, navigate to the definition, or show other references in the code where the function is being used. In addition to making code reviews higher quality and more efficient, developers will have an easier time investigating complex implementations when reading the source of their favorite library. With Sourcegraph, we’re enabling developers with a richer UX by gathering more information about the code they are reading.\n\nSee for yourself by reading the discussions on the [MR](https://gitlab.com/gitlab-org/gitlab/merge_requests/16556) and viewing changes made to the code. As always, we’re collaborating in the open and encourage the community to provide constructive feedback on our project. Drop a line in the blog comments to share your thoughts.\n\nFor a more detailed overview of the UX of functionality and features, check out [this blog post](https://about.sourcegraph.com/blog/gitlab-integrates-sourcegraph-code-navigation-and-code-intelligence) by Christina Forney, product manager at Sourcegraph.\n\n## What does this mean for our users?\n\nGitlab’s integration with Sourcegraph will be available in our [12.5 release](/upcoming-releases/) on November 22, 2019. We aim to provide code intelligence and code navigation functionality in this integration which was historically provided by the Sourcegraph’s browser extension. Now that we built this integration the browser extension is no longer needed to provide this functionality.\n\nIn the spirit of [iteration](https://handbook.gitlab.com/handbook/values/#iteration) our rollout strategy on GitLab.com is to **first dogfood** the functionality within our [*gitlab-org*](https://gitlab.com/gitlab-com/) group, which is where GitLab stores [source code for GitLab.com](/solutions/source-code-management/) and GitLab Enterprise. Over time, we aim to roll out Sourcegraph capabilities across code views within projects to all *public projects* on GitLab.com. Users will still require the browser extension configured to a private instance of Sourcegraph for **private projects** on GitLab.com.\n\nIf you’re self-managing your GitLab EE deployment and would like to enable Sourcegraph code intelligence, you must have a private Sourcegraph instance running as an external service. This is required because Sourcegraph.com does not index any private code for privacy and security reasons. We will have formal documentation on how to get started with GitLab EE and Sourcegraph soon, but if you’re super curious, [you can see our work in progress here](https://gitlab.com/gitlab-org/gitlab/blob/ps-sourcegraph-playground/doc/integration/sourcegraph.md) within the MR branch.\n\n## What’s next?\n\nStay tuned for our 12.5 release announcement on November 22 and updates containing details around our integration with Sourcegraph. Give us a [thumbs up](https://gitlab.com/gitlab-org/gitlab/merge_requests/16556) if you like what we’re working on. If you’re new to Sourcegraph and/or GitLab, [sign up here](https://gitlab.com/users/sign_up) and install [the browser extension](https://docs.sourcegraph.com/integration/gitlab#browser-extension) to test out these features right away. [Here is a link to a file in one of our public projects where you can test out these features](https://gitlab.com/gitlab-org/gitlab-runner/blob/master/executors/ssh/executor_ssh.go).\n\n[Cover photo](https://unsplash.com/photos/qjnAnF0jIGk) by [Markus Spiske](https://unsplash.com/@markusspiske) on Unsplash.\n{: .note}\n",[109,9,232],{"slug":2520,"featured":6,"template":734},"sourcegraph-code-intelligence-integration-for-gitlab","content:en-us:blog:sourcegraph-code-intelligence-integration-for-gitlab.yml","Sourcegraph Code Intelligence Integration For Gitlab","en-us/blog/sourcegraph-code-intelligence-integration-for-gitlab.yml","en-us/blog/sourcegraph-code-intelligence-integration-for-gitlab",{"_path":2526,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2527,"content":2533,"config":2538,"_id":2540,"_type":14,"title":2541,"_source":16,"_file":2542,"_stem":2543,"_extension":19},"/en-us/blog/the-continued-support-of-fluxcd-at-gitlab",{"title":2528,"description":2529,"ogTitle":2528,"ogDescription":2529,"noIndex":6,"ogImage":2530,"ogUrl":2531,"ogSiteName":720,"ogType":721,"canonicalUrls":2531,"schema":2532},"The continued support of FluxCD at GitLab","GitLab is committed to working with other partners to make sure that Flux remains a stable, reliable, and mature Cloud Native Computing Foundation project.\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/the-continued-support-of-fluxcd-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The continued support of FluxCD at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2024-03-05\",\n      }",{"title":2528,"description":2529,"authors":2534,"heroImage":2530,"date":2535,"body":2536,"category":1250,"tags":2537},[1845],"2024-03-05","Last month, Weaveworks CEO Alexis Richardson [announced publicly](https://www.linkedin.com/posts/richardsonalexis_hi-everyone-i-am-very-sad-to-announce-activity-7160295096825860096-ZS67) the company, which is the main sponsor of FluxCD, is closing its doors and shutting down its commercial operations.\n\nGitLab made a strategic decision in early 2023 [to integrate FluxCD with its agent for Kubernetes offering](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/) as the recommended GitOps solution. While we were sad to see the news about Weaveworks, the company, it in no way changes our commitment to FluxCD, the project, and its ability to drive efficiencies for our customers. FluxCD is a mature, enterprise-ready GitOps solution with a modern, modular architecture and clean codebase that lends itself for integration and requires minimal maintenance.\n\nIn the past month, we have had discussions with a number of companies that built their tooling around FluxCD, and together we are certain that FluxCD is a solution we want to continue to support and rely upon. We looked into switching to alternatives, but decided against other options. We are confident in the future of Flux. Flux is a mature Cloud Native Computing Foundation (CNCF) project with a large and dedicated user base. We believe that our continued support and integration with Flux serves our users the best.\n\nUnfortunately, such an organizational change affects the status of the Flux maintainers. At GitLab, we are committed to open source. When we decided to integrate with Flux, we knew that, sooner or later, we would like to have FluxCD maintainers within GitLab. Given the recent changes, we are committed even more to playing an active role in the Flux community and we want to support FluxCD for enterprise customers.\n\n> “GitLab is a proven platform for software delivery, and I am really pleased to see their leadership standing up to help and support Flux. As the inventors of GitOps and FluxCD, I know that Weaveworks people and all our customers will want to see this. For my part, I’m more confident in the future of Flux than ever, and I’m happy to see GitLab being one of the companies working on enterprise Flux support.” - Alexis Richardson, CEO, Weaveworks\n\nAs these are turbulent times in the Flux community, we are working closely with other partners to make sure that Flux remains a stable, reliable, and mature CNCF project.\n\n> Read more about [our FluxCD integration](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/).\n",[9,1250,731,1475],{"slug":2539,"featured":91,"template":734},"the-continued-support-of-fluxcd-at-gitlab","content:en-us:blog:the-continued-support-of-fluxcd-at-gitlab.yml","The Continued Support Of Fluxcd At Gitlab","en-us/blog/the-continued-support-of-fluxcd-at-gitlab.yml","en-us/blog/the-continued-support-of-fluxcd-at-gitlab",{"_path":2545,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2546,"content":2552,"config":2557,"_id":2559,"_type":14,"title":2560,"_source":16,"_file":2561,"_stem":2562,"_extension":19},"/en-us/blog/the-kubecon-summary-from-a-product-perspective",{"title":2547,"description":2548,"ogTitle":2547,"ogDescription":2548,"noIndex":6,"ogImage":2549,"ogUrl":2550,"ogSiteName":720,"ogType":721,"canonicalUrls":2550,"schema":2551},"How what we learned at KubeCon EU 2022 will impact our product roadmaps","Platform integrations and secrets management are among our product team's primary takeaways. Find out why.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097776/Blog/Hero%20Images/Blog/Hero%20Images/2_2.png_1750097776369.png","https://about.gitlab.com/blog/the-kubecon-summary-from-a-product-perspective","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How what we learned at KubeCon EU 2022 will impact our product roadmaps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-05-31\",\n      }",{"title":2547,"description":2548,"authors":2553,"heroImage":2549,"date":2554,"body":2555,"category":576,"tags":2556},[1845],"2022-05-31","\nAfter two years of only virtual KubeCon events, the GitLab product team was excited to participate in and meet colleagues, partners, and more from our industry at KubeCon EU 2022, held in Valencia, Spain. We were present with four product leaders, a software developer, and a UX researcher. This post summarizes our primary takeaways from the conference, an experience that will affect our roadmaps.\n\nWe will discuss the following topics:\n\n- Internal platforms and GitOps\n- Secrets management\n- Infrastructure integrations\n- WebAssembly a.k.a. WASM\n\nThere were 32 topic types and several 0-day events at KubeCon. Many talks focused on a few tools. Many Cloud Native Computing Foundation ([CNCF](https://www.cncf.io/)) projects had their community meetings during these days. Some talks were given IRL, and others were broadcast virtually with live Q&A. There were a variety of topics and approaches. There were many talks about the various aspects of cluster management, too. However, we left this topic out on purpose because at GitLab we want to focus on the software developers and provide one DevOps platform to support their work. Cluster management is one step away from this focus. Still, we noticed some remarkable patterns as highlighted by the four elements of our list.\n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\n## Internal platforms and GitOps\n\nCompanies want their developers to focus on their core business. They create internal platforms to hide the complexity of Day 0-2 operations from their software engineers and still allow the \"shift left\" movement of DevOps. These platforms often involve the welding of several tools.\n\nMany talks presented how the given team or company approached their platform problem and what tools they used, and one could often feel the 18-month sweat of a whole platform team trying to come up with a solution.\n\nThese platforms use either a push- or pull-based model for deployments. No single approach is emerging due to legacy applications and different requirements. While there is a definition of GitOps provided by the [OpenGitOps](https://opengitops.dev/) initiative, several presenters offered their own definitions, including of pull-based deployments.\n\nWe fielded a large-scale survey related to secrets at KubeCon, and learned that users would like help with the [Pipeline Authoring](/direction/verify/pipeline_composition/) workflow.\n\nBesides the wiring of the tools, the industry is still looking for a unified approach to multi-tenancy (there might not be one), and sometimes integrating security processes seems overly challenging.\n\n### How does this affect our roadmap?\n\nThere is a lot of potential in building a platform used as the starting point for internal platforms. Imagine a \"tool\" that shortens the time required to create an internal platform to days or weeks instead of a whole year. This is the GitLab vision of The One DevOps platform.\n\nAs a result, we don't plan any changes in our direction. We will continue investing in the recently started [Deployment direction](/direction/delivery/) to provide all the building blocks for a platform in a single tool and are already actively looking for integrated experiences across our offering.\n\nWe’re working on a CI/CD Component Catalog that includes CI templates. This will [support the Pipeline Authoring workflow](https://gitlab.com/groups/gitlab-org/-/epics/7462).\n\n## Secrets management\n\nOne of the things that often came up in our discussions is secrets management. We fielded a large-scale survey related to secrets at KubeCon, and attendees were glad that we’re thinking about this topic. Security is part of the DevOps discussion, and secrets management is a serious issue, especially in a cloud-native aspect.\n\n- Jenkins, GitHub and GitLab were all mentioned during the secret management discussions.\n- Users would like to offload the secrets management responsibility to another product. In many cases, their security requirements are strict, so they don't want/can't handle secrets by themselves.\n- Hashicorp Vault is a preferred tool (primarily in large enterprise companies working in finance or government) to manage and handle secrets. At the same time, most companies would like to avoid operating one more application in their stack.\n- Open ID Connect [OIDC](https://docs.gitlab.com/ee/integration/openid_connect_provider.html) with the JSON web token (JWT) is an essential direction for us.\n\n### How does this affect our roadmap?\n\nWe should invest more in secrets management since this is a pain our customers would like us to solve, and it's becoming a nonstarter feature for many organizations.\n\nWe want to advance in three main vectors:\n\n- Improve our existing secrets management solution - although we don't have a clear solution, we should improve our current variables capabilities to include additional features that could help users leverage variables for secrets. So it would be a \"good enough\" feature they can use. We are actively working toward this direction by removing some of the limitations we have around [variables and masking](https://gitlab.com/groups/gitlab-org/-/epics/1994).\n- Improve our existing [Hashicorp Vault integration](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/) using the JWT token, allowing us to integrate with additional vendors (AWS, AZURE, GCP). Like the previous point, we are moving toward this direction by supporting OIDC and [adding audience claims to our JWT token](https://gitlab.com/groups/gitlab-org/-/epics/7335).\n- We need to develop [a clear strategy for a built-in secrets management solution](/direction/govern/pipeline_security/secrets_management/#next-9-12-monhts). In order to provide our users/customers with choice, GitLab wants to use Hashicorp Vault for secrets management handling. We believe that our approach should be not to build the logic ourselves but to leverage an open source, [cloud native](/topics/cloud-native/) project that we could build into GitLab.\n\n## Infrastructure integrations\n\nInfrastructure integrations came in several flavors during the talks. Some are about cluster management, that is not our focus in this blog. Several presentations show that internal platforms need a strong infrastructure aspect, too. When a new project/microservice is started, it might require a new namespace in the cluster with associated RBAC and policies, optionally storage, a source code management repo with automation, and the appropriate permissions. Deployments might create ephemeral environments or could modify the underlying environment within predefined constraints.\n\nThe top tools mentioned in this area are:\n\n- Terraform\n- Crossplane\n- Pulumi\n\n### How does this affect our roadmap?\n\nGitLab already has [great integrations for Terraform](https://docs.gitlab.com/ee/user/infrastructure/iac/), and the other tools are on our radar, too.\n\nWe are open to integrations but cannot currently prioritize the other integrations on our own. We hope that the community will be interested in contributing to benefit everyone.\n\nBuilding Docker containers might not be necessary to get easy-to-manage container binaries. WASM runtimes become available for Kubernetes, and many programming languages can natively compile to WASM. WASM can provide a secure runtime environment without Docker and might be able to simplify the toolchain developers need to learn.\n\nWe don't plan to add direct WASM support to GitLab yet. The generic package registry can hold WASM modules while their deployment is up to the user.\n\nAt the same time, we see a lot of potential in simple runtime environments built around WASM. While GitLab is not in the business of offering runtime services, we will be actively monitoring the market. We might look into more WASM integrations as we see more demand and tools and services maturing in this space.\n\n## GitLab feedback\n\nIt's great to work on a product where the overall sentiment is positive, both from customers that intensely rely on it and from attendees that have to use other tools but would love to use GitLab or just started to play with it recently.\n\nWe received the following notable mentions as feedback:\n\n- Stability and reliability improved over the last several months.\n- Users love our documentation (primarily around CI) - they mentioned it's easy to use and get started with.\n- Given the size of GitLab and the number of our users, we received feedback about long-outstanding issues. We were happy to respond that we are addressing at least some of them shortly.\n- Several customers had asked if we got some resources for migrating from Jenkins to GitLab.\n- A few customers mentioned that they had to move away from GitLab mainly because of an upper-level decision despite favouring GitLab.\n\n## Conclusions\n\n![The GitLab team](https://about.gitlab.com/images/blogimages/kubecon-gitlab-team.jpg)\n\nWe enjoyed all the talks and were delighted to meet and speak with our users and customers. Thanks to all of you, we could \"feel the pulse\" on how we are doing and validate our direction.\n\nWe hope that this blog will guide those who could not [attend KubeCon](https://about.gitlab.com/events/) and serve as a summary for those who did attend. All the recordings will likely be available on YouTube from Jun 6, 2022.\n\nLet us know in the comments if you think we missed some important direction.\n\n_This blog post and linked pages contain information related to upcoming products, features, and functionality.\nIt is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc._\n",[859,1021,1062,558,9,563],{"slug":2558,"featured":6,"template":734},"the-kubecon-summary-from-a-product-perspective","content:en-us:blog:the-kubecon-summary-from-a-product-perspective.yml","The Kubecon Summary From A Product Perspective","en-us/blog/the-kubecon-summary-from-a-product-perspective.yml","en-us/blog/the-kubecon-summary-from-a-product-perspective",{"_path":2564,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2565,"content":2570,"config":2576,"_id":2578,"_type":14,"title":2579,"_source":16,"_file":2580,"_stem":2581,"_extension":19},"/en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access",{"title":2566,"description":2567,"ogTitle":2566,"ogDescription":2567,"noIndex":6,"ogImage":1420,"ogUrl":2568,"ogSiteName":720,"ogType":721,"canonicalUrls":2568,"schema":2569},"Tutorial: Install VS Code on a cloud provider VM and set up remote access","Learn how to automate the installation of VS Code on a VM running on a cloud provider and how to access it from your local laptop.","https://about.gitlab.com/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Install VS Code on a cloud provider VM and set up remote access\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-05-06\",\n      }",{"title":2566,"description":2567,"authors":2571,"heroImage":1420,"date":2573,"body":2574,"category":752,"tags":2575},[2572],"Cesar Saavedra","2024-05-06","DevSecOps teams can sometimes find they need to run an instance of Visual\nStudio Code (VS Code) remotely for team members to share when they don't\nhave enough local resources. However, installing, running, and using VS Code\non a remote virtual machine (VM) via a cloud provider can be a complex\nprocess full of pitfalls and false starts. This tutorial covers how to\nautomate the installation of VS Code on a VM running on a cloud provider.\n\n\nThis approach involves two separate GitLab projects, each with its own\npipeline. The first one uses Terraform to instantiate a virtual machine in\nGCP running Linux Debian. The second one installs VS Code on the newly\ninstantiated VM. Lastly, we provide a procedure on how to set up your local\nMac laptop to connect and use the VS Code instance installed on the remote\nVM.\n\n\n## Create a Debian Linux distribution VM on GCP\n\n\nHere are the steps to create a Debian Linux distribution VM on GCP.\n\n\n### Prerequisites\n\n\n1. A GCP account. If you don't have one, please [create\none](https://cloud.google.com/free?hl=en).\n\n2. A GitLab account on [gitlab.com](https://gitlab.com/users/sign_in)\n\n\n**Note:** This installation uses:\n\n\n- Debian 5.10.205-2 (2023-12-31) x86_64 GNU/Linux, a.k.a Debian 11\n\n\n### Create a service account and download its key\n\n\nBefore you create the first GitLab project, you need to create a service\naccount in GCP and then generate and download a key. You will need this key\nso that your GitLab pipelines can communicate to GCP and the GitLab API.\n\n\n1. To authenticate GCP with GitLab, sign in to your GCP account and create a\n[GCP service\naccount](https://cloud.google.com/docs/authentication#service-accounts) with\nthe following roles:\n\n- `Compute Network Admin`\n\n- `Compute Admin`\n\n- `Service Account User`\n\n- `Service Account Admin`\n\n- `Security Admin`\n\n\n3. Download the JSON file with the service account key you created in the\nprevious step.\n\n4. On your computer, encode the JSON file to `base64` (replace\n`/path/to/sa-key.json` to the path where your key is located):\n\n   ```shell\n   base64 -i /path/to/sa-key.json | tr -d \\\\n\n   ```\n\n**NOTE:** Save the output of this command. You will use it later as the\nvalue for the `BASE64_GOOGLE_CREDENTIALS` environment variable.\n\n\n### Configure your GitLab project\n\n\nNext, you need to create and configure the first GitLab project.\n\n\n1. Create a group in your GitLab workspace and name it `gcpvmlinuxvscode`.\n\n\n1. Inside your newly created group, clone the following project:\n\n   ```shell\n   git@gitlab.com:tech-marketing/sandbox/gcpvmlinuxvscode/gcpvmlnxsetup.git\n   ```\n\n1. Drill into your newly cloned project, `gcpvmlnxsetup`, and set up the\nfollowing CI/CD variables to configure it:\n   1. On the left sidebar, select **Settings > CI/CD**.\n   1. Expand **Variables**.\n   1. Set the variable `BASE64_GOOGLE_CREDENTIALS` to the `base64` encoded JSON file you created in the previous section.\n   1. Set the variable `TF_VAR_gcp_project` to your GCP `project` ID.\n   1. Set the variable `TF_VAR_gcp_region` to your GCP `region` ID, e.g. us-east1, which is also its default value.\n   1. Set the variable `TF_VAR_gcp_zone` to your GCP `zone` ID, e.g. us-east1-d, which is also its default value.\n   1. Set the variable `TF_VAR_machine_type` to the GCP `machine type` ID, e.g. e2-standard-2, which is also its default value.\n   1. Set the variable `TF_VAR_gcp_vmname` to the GCP `vm name` you want to give the VM, e.g. my-test-vm, which is also its default value.\n\n**Note:** We have followed a minimalist approach to set up this VM. If you\nwould like to customize the VM further, please refer to the [Google\nTerraform\nprovider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference)\nand the [Google Compute Instance Terraform\nprovider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance)\ndocumentation for additional resource options.\n\n\n### Provision your VM\n\n\nAfter configuring your project, manually trigger the provisioning of your VM\nas follows:\n\n\n1. On the left sidebar, go to **Build > Pipelines**.\n\n1. Next to **Play** (**{play}**), select the dropdown list icon\n(**{chevron-lg-down}**).\n\n1. Select **Deploy** to manually trigger the deployment job.\n\n\nWhen the pipeline finishes successfully, you can see your new VM on GCP:\n\n\n- Check it on your [GCP console's VM instances\nlist](https://console.cloud.google.com/compute/instances).\n\n\n### Remove the VM\n\n\n**Important note:** Only run the cleanup job when you no longer need the GCP\nVM and/or the VS Code that you installed in it.\n\n\nA manual cleanup job is included in your pipeline by default. To remove all\ncreated resources:\n\n\n1. On the left sidebar, select **Build > Pipelines** and select the most\nrecent pipeline.\n\n1. For the `destroy` job, select **Play** (**{play}**).\n\n\n## Install and set up VS Code on a GCP VM\n\n\nPerform the steps in this section only after you have successfully finished\nthe previous sections above. In this section, you will create the second\nGitLab project that will install VS Code and its dependencies on the running\nVM on GCP.\n\n\n### Prerequisites\n\n\n1. A provisioned GCP VM. We covered this in the previous sections.\n\n\n**Note:** This installation uses:\n\n\n- VS Code Version 1.85.2\n\n\n### Configure your project\n\n\n**Note:** Since you will be using the `ssh` command multiple times on your\nlaptop, we strongly suggest that you make a backup copy of your laptop local\ndirectory `$HOME/.ssh` before continuing.\n\n\nNext, you need to create and configure the second GitLab project.\n\n\n1. Head over to your GitLab group `gcpvmlinuxvscode`, which you created at\nthe beginning of this post.\n\n\n1. Inside group, `gcpvmlinuxvscode`, clone the following project:\n\n   ```shell\n   git@gitlab.com:tech-marketing/sandbox/gcpvmlinuxvscode/vscvmsetup.git\n   ```\n\n1. Drill into your newly cloned project, `vscvmsetup` and set up the\nfollowing CI/CD variables to configure it:\n   1. On the left sidebar, select **Settings > CI/CD**.\n   1. Expand **Variables**.\n   1. Set the variable `BASE64_GOOGLE_CREDENTIALS` to the `base64` encoded JSON file you created in project `gcpvmlnxvsc`. You can copy this value from the variable with the same name in project `gcpvmlnxvsc`.\n   1. Set the variable `gcp_project` to your GCP `project` ID.\n   1. Set the variable `gcp_vmname` to your GCP `region` ID, e.g. us-east1.\n   1. Set the variable `gcp_zone` to your GCP `zone` ID, e.g. us-east1-d.\n   1. Set the variable `vm_pwd` to the password that you will use to ssh to the VM.\n   1. Set the variable `gcp_vm_username` to the first portion (before the \"@\" sign) of the email associated to your GCP account, which should be your GitLab email.\n\n### Run the project pipeline\n\n\nAfter configuring the second GitLab project, manually trigger the\nprovisioning of VS Code and its dependencies to the GCP VM as follows:\n\n\n1. On the left sidebar, select **Build > Pipelines** and click on the button\n**Run Pipeline**. On the next screen, click on the button **Run pipeline**.\n\n    The pipeline will:\n\n    - install `xauth` on the virtual machine. This is needed for effective X11 communication between your local desktop and the VM \n    - install `git` on the VM\n    - install `Visual Studio Code` on the VM.\n\n2. At this point, you can wait until the pipeline successfully completes. If\nyou don't want to wait, you can continue to do the first step of the next\nsection. However, you must ensure the pipeline has successfully completed\nbefore you can perform Step 2 of the next section.\n\n\n### Connect to your VM from your local Mac laptop\n\n\nNow that you have an instance of VS Code running on a Linux VM on GCP, you\nneed to configure your Mac laptop to be able to act as a client to the\nremote VM. Follow these steps:\n\n\n1. To connect to the remote VS Code from your Mac, you must first install\n`XQuartz` on your Mac. You can execute the following command on your Mac to\ninstall it:\n\n\n```\n\nbrew install xquartz\n\n```\n\nOr, you can follow the instructions from the following\n[tutorial](https://und.edu/research/computational-research-center/tutorials/mac-x11.html)\nfrom the University of North Dakota.\n\n\nAfter the pipeline for project `vscvmsetup` successfully executes to\ncompletion (pipeline you manually executed in the previous section), you can\nconnect to the remote VS Code as follows:\n\n\n2. Launch `XQuartz` on your Mac (it should be located in your Applications\nfolder). Its launching should open up an `xterm` on your Mac. If it does\nnot, then you can select **Applications > Terminal** from the `XQuartz` top\nmenu. \n\n3. On the `xterm`, enter the following command:\n\n\n```\n\ngcloud compute ssh --zone \"[GCP zone]\" \"[name of your VM]\" --project \"[GCP\nproject]\" --ssh-flag=\"-Y\"\n\n```\n\nWhere:\n\n\n- `[VM name]` is the name of the VM you created in project `gcpvmlnxvsc`.\nIts value should be the same as the `gcp_project` variable.\n\n- `[GCP zone]` is the zone where the VM is running. Its value should be the\nsame as the `gcp_vmname` variable.\n\n- `[GCP project]` is the name of your GCP project assigned name. Its value\nshould be the same as the `gcp_project` variable.\n\n\n***Note: If you have not installed the Google Cloud CLI, please do so by\nfollowing the [Google\ndocumentation](https://cloud.google.com/sdk/docs/install).***\n\n\n4. If you have not used SSH on your Mac before, you may not have a `.ssh` in\nyour `HOME` directory. If this is the case, you will be asked if you would\nlike to continue with the creation of this directory. Answer **Y**.\n\n\n5. Next, you will be asked to enter the same password twice to generate a\npublic/private key. Enter the same password you used when defining the\nvariable `vm_pwd` in the required configuration above.\n\n\n6. Once the SSH key is done propagating, you will need to enter the password\nagain two times to log in to the VM.\n\n\n7. You should now be logged in to the VM.\n\n\n### Create a personal access token\n\n\nThe assumption here is that you already have a GitLab project that you would\nwant to open from and work on the remote VS Code. To do this, you will need\nto clone your GitLab project from the VM. First, you will be using a\npersonal access token (PAT) to clone your project.\n\n\n1. Head over to your GitLab project (the one that you'd like to open from\nthe remote VS Code).\n\n2. From your GitLab project, create a\n[PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token),\nname it `pat-gcpvm` and ensure that it has the following scopes:\n`read_repository`, `write_repository`, `read_registry`, `write_registry`,\nand `ai_features`\n\n3. Save the generated PAT somewhere safe; you will need it later.\n\n\n### Clone the read_repository\n\n\n1. On your local Mac, from the `xterm` where you are logged on to the remote\nVM, enter the following command:\n\n\n```\n\ngit clone https://[your GitLab\nusername]:[personal_access_token]@gitlab.com/[GitLab project name].git \n\n```\n\n\nWhere:\n\n\n- `[your GitLab username]` is your GitLab handle.\n\n- `[personal_access_token]` is the PAT you created in the previous section.\n\n- `[GitLab project name]` is the name of the project that contains the\nGitLab Code Suggestions test cases.\n\n\n## Launch Visual Studio Code\n\n\n1. From the `xterm` where you are logged in to the VM, enter the following\ncommand:\n\n\n```\n\ncode\n\n```\n\n\nWait for a few seconds and Visual Studio Code will appear on your Mac\nscreen.\n\n\n2. From the VS Code menu, select **File > Open Folder...\"\n\n3. In the File chooser, select the top-level directory of the GitLab project\nyou cloned in the previous section\n\n\nThat's it! You're ready to start working on your cloned GitLab project using\nthe VS Code that you installed on a remote Linux-based VM.\n\n\n### Troubleshooting\n\n\nWhile using the remotely installed VS Code from your local Mac, you may\nencounter a few issues. In this section, we provide guidance on how to\nmitigate them.\n\n\n#### Keyboard keys not mapped correctly\n\n\nIf, while running VS Code, you are having issues with your keyboard keys not\nbeing mapped correctly, e.g. letter e is backspace, letter r is tab, letter\ns is clear line, etc., do the following:\n\n\n1. In VS Code, select **File > Preferences > Settings**.\n\n1. Search for \"keyboard\". If having issues with the letter e, then search\nfor \"board\". Click on the \"Keyboard\" entry under \"Application.\"\n\n1. Ensure that the Keyboard Dispatch is set to \"keyCode.\"\n\n1. Restart VS Code.\n\n1. If you need further help, this is a good resource for [keyboard\nproblems](https://github.com/microsoft/vscode/wiki/Keybinding-Issues#troubleshoot-linux-keybindings).\n\n\n#### Error loading webview: Error\n\n\nIf while running VS Code, you get a message saying:\n\n\n\"Error loading webview: Error: Could not register service worker:\nInvalidStateError: Failed to register a ServiceWorker: The document is in an\ninvalid state.\"\n\n\n1. Exit VS Code and then enter this cmd from the `xterm` window:\n\n\n`killall code`\n\n\nYou may need to execute this command two or three times in a row to kill all\nVS Code processes.\n\n\n2. Ensure that all VS Code-related processes are gone by entering the\nfollowing command from the `xterm` window:\n\n\n`ps -ef | grep code`\n\n\n3. Once all the VS Code-related processes are gone, restart VS Code by\nentering the following command from the `xterm` window:\n\n\n`code`\n\n\n#### Some useful commands to debug SSH\n\n\nHere are some useful commands to run on the VM that can help you debug SSH\nissues:\n\n\n1. To get the status, location and latest event of sshd:\n\n\n`sudo systemctl status ssh`\n\n\n2. To see the log of sshd:\n\n\n`journalctl -b -a -u ssh`\n\n\n3. To restart to SSH daemon:\n\n\n`sudo systemctl restart ssh.service`\n\n\nOr\n\n\n`sudo systemctl restart ssh`\n\n\n4. To start a root shell:\n\n\n`sudo -s`\n\n\n## Get started\n\n\nThis article described how to:\n\n- instantiate a Linux-based VM on GCP\n\n- install VS Code and dependencies on the remote VM\n\n- clone an existing GitLab project of yours in the remote VM\n\n- open your remotely cloned project from the remotely installed VS Code\n\n\nAs a result, you can basically use your laptop as a thin client that\naccesses a remote server, where all the work takes place.\n\n\n> The automation to get all these parts in place was done by GitLab. Sign up\nfor a [free GitLab Ultimate\ntrial](https://about.gitlab.com/free-trial/) to get started today!\n",[9,1146,731],{"slug":2577,"featured":91,"template":734},"tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access","content:en-us:blog:tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access.yml","Tutorial Install Vs Code On A Cloud Provider Vm And Set Up Remote Access","en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access.yml","en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access",{"_path":2583,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2584,"content":2590,"config":2596,"_id":2598,"_type":14,"title":2599,"_source":16,"_file":2600,"_stem":2601,"_extension":19},"/en-us/blog/understanding-kubernestes-rbac",{"title":2585,"description":2586,"ogTitle":2585,"ogDescription":2586,"noIndex":6,"ogImage":2587,"ogUrl":2588,"ogSiteName":720,"ogType":721,"canonicalUrls":2588,"schema":2589},"What you need to know about Kubernetes RBAC","Role-based access control is now default, and expected in most Kubernetes deployments. Here's the What, Why and How of RBAC.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678884/Blog/Hero%20Images/understanding-kubernetes-rbac-post-cover.jpg","https://about.gitlab.com/blog/understanding-kubernestes-rbac","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"What you need to know about Kubernetes RBAC\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2018-08-07\",\n      }",{"title":2585,"description":2586,"authors":2591,"heroImage":2587,"date":2593,"body":2594,"category":752,"tags":2595},[2592],"Abubakar Siddiq Ango","2018-08-07","Managing access to resources is an essential part of ensuring the\nreliability, security, and efficiency of any infrastructure, but can quickly\nget complicated to manage. With Kubernetes, attribute-based access control\n(ABAC) is very powerful but complex, while role-based access control (RBAC)\nmakes it easier to manage permissions using kubectl and the Kubernetes API\ndirectly. This post shares how to get started with RBAC and some best\npractices to adopt.\n\n\n## RBAC vs ABAC\n\n\nRBAC made beta [release with Kubernetes\n1.6](https://kubernetes.io/blog/2017/04/rbac-support-in-kubernetes/) and\ngeneral availability [with\n1.8](https://kubernetes.io/blog/2017/10/using-rbac-generally-available-18/).\nA fundamental building block of Kubernetes, RBAC is an authorization\nmechanism for controlling how the Kubernetes API is accessed using\npermissions.\n\n\nRBAC is now preferred over ABAC, which is difficult to manage and\nunderstand. ABAC also requires SSH and root access to make authorization\npolicy changes.\n\n\nResource management can be delegated using RBAC without giving away SSH\naccess to the Cluster Master VM and permission policies can be configured\nusing kubectl or the Kubernetes API itself.\n\n\n## RBAC resources\n\n\nUsing RBAC, Authorizations can be given using a set of permissions that can\nbe limited within a namespace or the entire cluster. To do this, you can\ndefine A set of permission is called a Role, which is defined within a\nnamespace. If you want A role that is cluster-wide, this is defined as a\nClusterRole.\n\n\nBelow, you can see an example of a role definition:\n\n\n### Role\n\n\n```\n\nkind: Role\n\napiVersion: rbac.authorization.k8s.io/v1\n\nmetadata:\n  namespace: default\n  name: pod-reader\nrules:\n\n- apiGroups: [\"\"] # \"\" indicates the core API group\n  resources: [\"pods\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n```\n\n\nLike other Kubernetes resources, a role definition contains kind,\napiVersion, and metadata, but with the addition of rules.\n\n\nFor the rules key, you will define how your permissions will work. You can\nspecify what resources within apiGroup(s) are permitted and how they can be\naccessed using verbs (including `create`, `delete`, `deletecollection`,\n`get`, `list`, `patch`, `update`, and `watch`). The apiGroups key defines\nthe location in the API where the resources are found. If you provide an\nempty value in this list, it means the core API group.\n\n\n### ClusterRole\n\n```\n\nkind: ClusterRole\n\napiVersion: rbac.authorization.k8s.io/v1\n\nmetadata:\n  # \"namespace\" omitted since ClusterRoles are not namespaced\n  name: secret-reader\nrules:\n\n- apiGroups: [\"\"]\n  resources: [\"secrets\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n```\n\n\nThe major difference in the definition for a `ClusterRole` is the absence of\na namespace, because the permissions defined here are cluster-scoped.\nHowever, when referenced by a `RoleBinding`, a `ClusterRole` can be used to\ngrant permissions to namespaced resources defined in the `ClusterRole` role\nwithin the `RoleBinding`’s namespace.\n\n\n### RoleBinding and ClusterRoleBinding\n\n\nA RoleBinding allows you to associate a Role with a user or list of users.\nThis grants the Role permissions to the users. The user(s) are defined under\nsubjects, and the Role association under role references (roleRef). For\nexample:\n\n\n#### RoleBinding:\n\n\n```\n\nkind: RoleBinding\n\napiVersion: rbac.authorization.k8s.io/v1\n\nmetadata:\n  name: read-pods\n  namespace: default\nsubjects:\n\n- kind: User\n  name: abu\n  apiGroup: rbac.authorization.k8s.io\nroleRef:\n  kind: Role\n  name: pod-reader\n  apiGroup: rbac.authorization.k8s.io\n```\n\n\n#### ClusterRoleBinding:\n\n\n```\n\nkind: ClusterRoleBinding\n\napiVersion: rbac.authorization.k8s.io/v1\n\nmetadata:\n  name: read-secrets-global\nsubjects:\n\n- kind: Group\n  name: manager\n  apiGroup: rbac.authorization.k8s.io\nroleRef:\n  kind: ClusterRole\n  name: secret-reader\n  apiGroup: rbac.authorization.k8s.io\n```\n\n\n## Best practices\n\n\nApplying the principle of [least\nprivileges](https://medium.com/@haim_50405/establish-least-privileged-best-practice-for-your-kubernetes-clusters-f0785e1aee39)\nis crucial, as it reduces exposure and vulnerability. A few of the essential\nbest practices include:\n\n\n- Be specific with the resources you are granting access to and the verbs\nbeing used; avoid wild cards\n\n- Use Roles instead of Cluster Roles where possible\n\n- Only give permissions required for the specific tasks to be performed by a\nuser and nothing more\n\n- Create and use service accounts for processes and services like\n[Tiller](https://docs.helm.sh/rbac#tiller-and-role-based-access-control)\nthat require permission instead of using the default service accounts\n\n\n## GitLab + RBAC\n\n\nCurrently, integrating GitLab with a Kubernetes cluster with RBAC enabled is\nnot supported. You will need to enable and use the legacy ABAC mechanism\n([see the documentation\nhere](https://docs.gitlab.com/ee/user/project/clusters/index.html#security-implications)).\nRBAC will be supported in [a future\nrelease](https://gitlab.com/gitlab-org/gitlab-ce/issues/29398). This affects\nGitLab.com and all self-managed versions of GitLab.\n\n\n## Learn more\n\n\n- [Controlling\naccess](https://kubernetes.io/docs/reference/access-authn-authz/controlling-access/)\n\n-\n[Authorization](https://kubernetes.io/docs/reference/access-authn-authz/authorization/)\n\n- [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)\n\n- [RBAC and TLS\ncertificates](https://sysdig.com/blog/kubernetes-security-rbac-tls/)\n",[859,9],{"slug":2597,"featured":6,"template":734},"understanding-kubernestes-rbac","content:en-us:blog:understanding-kubernestes-rbac.yml","Understanding Kubernestes Rbac","en-us/blog/understanding-kubernestes-rbac.yml","en-us/blog/understanding-kubernestes-rbac",{"_path":2603,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2604,"content":2610,"config":2615,"_id":2617,"_type":14,"title":2618,"_source":16,"_file":2619,"_stem":2620,"_extension":19},"/en-us/blog/use-waypoint-to-deploy-with-gitlab-cicd",{"title":2605,"description":2606,"ogTitle":2605,"ogDescription":2606,"noIndex":6,"ogImage":2607,"ogUrl":2608,"ogSiteName":720,"ogType":721,"canonicalUrls":2608,"schema":2609},"How to use HashiCorp Waypoint to deploy with GitLab CI/CD","Learn how to use Waypoint using GitLab CI/CD by following this step-by-step demo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679260/Blog/Hero%20Images/using-hashicorp-waypoint-deploy-gitlab-cicd.jpg","https://about.gitlab.com/blog/use-waypoint-to-deploy-with-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use HashiCorp Waypoint to deploy with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-10-15\",\n      }",{"title":2605,"description":2606,"authors":2611,"heroImage":2607,"date":2612,"body":2613,"category":1250,"tags":2614},[975],"2020-10-15","\n\nHashiCorp announced a new project at [HashiConf Digital](https://hashiconf.com/) called [Waypoint](https://www.waypointproject.io/). \n\n## Hashicorp Waypoint\n\nHashicorp Waypoint uses an HCL based configuration file to describe how to build, deploy, and release applications to various cloud platforms, ranging from Kubernetes to AWS to Google Cloud Run. Think of Waypoint as if Terraform and Vagrant came together to describe how to build, deploy, and release your applications.\n\nTrue to form, Hashicorp released Waypoint as open source and with a lot of examples. The orchestration layer is up to you – Waypoint ships as a binary you can run right on your laptop or from whatever CI/CD orchestration tool you choose. Where you deploy is up to you as well since Waypoint shipped with support for Kubernetes, Docker, Google Cloud Run, AWS ECS, and a few others.\n\n## Benefits of Hashicorp Waypoint\n\nHashicorp Waypoint is an open-source developer workflow that can run from any laptop or CI/CD tool. Deployment is also easier because Hashicorp ships to several platforms like Kubernetes, AWS, and more. \n\nWhen using Hashicorp to build, deploy, and release applications, there are several features to keep in mind:\n\n* Waypoint provides a number of workflow examples as guides.\n\n* Build, deploy, and release your application with the single command of “waypoint up.”\n\n* Execute commands in a deployed application just as easily using “waypoint exec.”\n\n* Get a real-time look at application logs to help to debug quickly when necessary.\n\n## Orchestrating Waypoint using GitLab CI/CD\n\nUsing the fantastic [Waypoint documentation](https://www.waypointproject.io/docs) and the excellent [example applications](https://github.com/hashicorp/waypoint-examples) that HashiCorp provided, we decided to take a look at orchestrating Waypoint using [GitLab CI/CD](/topics/ci-cd/). To do this, we’ll start from the simple [AWS ECS Node.js app](https://github.com/hashicorp/waypoint-examples/tree/main/aws-ecs/nodejs) from the example repository.\n\nAfter cloning, we can see the structure of a Node.js application that displays a single page.\n\n![Folder structure of the Waypoint example and the page it produces](https://about.gitlab.com/images/blogimages/waypoint-example.png)\n\nYou’ll see that Dockerfile is missing from that project. There isn’t one included in the example, and we actually won’t need one because Waypoint is going to take care of that for us. Take a closer look at the `waypoint.hcl` file to see what it will do.\n\n```hcl\nproject = \"example-nodejs\"\n\napp \"example-nodejs\" {\n  labels = {\n\t\"service\" = \"example-nodejs\",\n\t\"env\" = \"dev\"\n  }\n\n  build {\n\tuse \"pack\" {}\n\tregistry {\n  \tuse \"aws-ecr\" {\n    \tregion = \"us-east-1\"\n    \trepository = \"waypoint-gitlab\"\n    \ttag = \"latest\"\n  \t}\n\t}\n  }\n\n  deploy {\n\tuse \"aws-ecs\" {\n  \tregion = \"us-east-1\"\n  \tmemory = \"512\"\n\t}\n  }\n}\n```\n\nIn the build step, Waypoint uses [Cloud Native Buildpacks (CNB)](https://buildpacks.io/) to detect the language of the project and create a Docker image without any Dockerfile. This is actually the same technology that GitLab uses as part of [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) in the Auto Build step. We’re excited to see CNB from the CNCF get more adoption by users in the industry.\n\nOnce that image is built, Waypoint will automatically push the image to our AWS ECR registry to get it ready for the deploy. Once the build has completed, the deploy step uses the [AWS ECS plugin](https://www.waypointproject.io/plugins/aws-ecs) to deploy our application to our AWS account.\n\nFrom my laptop, that’s easy. I can have Waypoint installed, be already authenticated to my AWS account, and it \"just works\". But what if I want to expand this beyond my laptop? And what if I want to automate this deployment as part of my overall CI/CD pipeline where all of my current unit, security, and other tests run today? That’s where GitLab CI/CD comes in!\n\n## Waypoint in GitLab CI/CD\n\nTo orchestrate all of this in GitLab CI/CD, let’s take a look at what we’ll need for our `.gitlab-ci.yml` file:\n\n1. First, we’ll need a base image to run inside of. Waypoint works on any Linux distribution and just needs Docker to run, so we can start from a generic Docker image.\n1. Next, we’ll install Waypoint to that image. In the future, we could build a [meta build image](/blog/building-build-images/) to containerize this process for us.\n1. Finally, we’ll run the Waypoint commands.\n\nAbove is all we’ll need for our pipeline to run the scripts required to get the deploy done, but we will need one more thing in order to deploy to AWS: We’ll have to authenticate to our AWS account. On [Waypoint’s roadmap](https://www.waypointproject.io/docs/roadmap), there are some mentions of plans around authentication and authorization. HashiCorp also released an exciting project in this space this week, [Boundary](https://www.boundaryproject.io/). But for now, we can handle authentication and authorization ourselves relatively simply.\n\nTo authenticate GitLab CI/CD with AWS, there are a few options. The first option is to use GitLab’s integration with [HashiCorp Vault](https://www.vaultproject.io/) if your team is already using Vault for credential management. Alternatively, if your team manages authorization through AWS IAM, you can ensure that the deploy job runs on a [GitLab runner](https://docs.gitlab.com/runner/) that is authorized to run the deployment with IAM. But if you’re just getting started with Waypoint and want to get going quickly, the final option is to add your AWS API Key and Secret Key as a [GitLab CI/CD variable](https://docs.gitlab.com/ee/ci/variables/) named `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.\n\n## Putting it all together with Waypoint\n\nOnce the authentication is handled, we’re ready to go! Our final `.gitlab-ci.yml` looks like this:\n\n```yml\nwaypoint:\n  image: docker:latest\n  stage: build\n  services:\n    - docker:dind\n  # Define environment variables, e.g. `WAYPOINT_VERSION: '0.1.1'`\n  variables:\n    WAYPOINT_VERSION: ''\n    WAYPOINT_SERVER_ADDR: ''\n    WAYPOINT_SERVER_TOKEN: ''\n    WAYPOINT_SERVER_TLS: '1'\n    WAYPOINT_SERVER_TLS_SKIP_VERIFY: '1'\n  script:\n    - wget -q -O /tmp/waypoint.zip https://releases.hashicorp.com/waypoint/${WAYPOINT_VERSION}/waypoint_${WAYPOINT_VERSION}_linux_amd64.zip\n    - unzip -d /usr/local/bin /tmp/waypoint.zip\n    - rm -rf /tmp/waypoint*\n    - waypoint init\n    - waypoint build\n    - waypoint deploy\n    - waypoint release\n```\n\nYou can see that we start from the generic `docker:latest` image and set up some variables required by Waypoint. In the `script` section, we grab the latest Waypoint binary and install it to our local bin. Since our runner is already authorized with AWS, it’s as simple as running `waypoint init`, `build`, `deploy`, and `release`.\n\nThe output of the build job shows us the endpoint we’re deploying to:\n\n![Folder structure of the Waypoint example and the page it produces](https://about.gitlab.com/images/blogimages/waypoint-job-output.png)\n\nWaypoint is one of multiple [HashiCorp solutions that GitLab works great with](/partners/technology-partners/hashicorp/). For example, in addition to application delivery, we could orchestrate the underlying infrastructure with [Terraform through GiLab](https://docs.gitlab.com/ee/user/infrastructure/) as well. To standardize security in the SDLC, we could also integrate [GitLab with Vault](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/) to manage secrets and tokens within CI/CD pipelines that provides consistency for developers and operators relying on secrets management during development testing as well as in production use.\n\nThe joint solutions developed by HashiCorp and GitLab are helping organizations find a better way for application development, and keeping delivery, and infrastructure management workflows in lock step. Waypoint is just another step in the right direction and we’re excited to see where the project goes from here. \n\n## Getting started with Hashicorp Waypoint\n\nYou can learn more about Waypoint at [waypointproject.io](https://www.waypointproject.io/). Also check out their [documentation](https://www.waypointproject.io/docs) and [roadmap](https://www.waypointproject.io/docs/roadmap) for the project. We have [contributed](https://github.com/hashicorp/waypoint/pull/492) everything we learned to the [GitLab CI/CD integration docs](https://www.waypointproject.io/docs/automating-execution/gitlab-cicd). You can also find a full working GitLab example in [this repository](https://gitlab.com/brendan-demo/waypoint) if you want to try it for yourself!\n",[9,563],{"slug":2616,"featured":6,"template":734},"use-waypoint-to-deploy-with-gitlab-cicd","content:en-us:blog:use-waypoint-to-deploy-with-gitlab-cicd.yml","Use Waypoint To Deploy With Gitlab Cicd","en-us/blog/use-waypoint-to-deploy-with-gitlab-cicd.yml","en-us/blog/use-waypoint-to-deploy-with-gitlab-cicd",{"_path":2622,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2623,"content":2628,"config":2634,"_id":2636,"_type":14,"title":2637,"_source":16,"_file":2638,"_stem":2639,"_extension":19},"/en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards",{"title":2624,"description":2625,"ogTitle":2624,"ogDescription":2625,"noIndex":6,"ogImage":718,"ogUrl":2626,"ogSiteName":720,"ogType":721,"canonicalUrls":2626,"schema":2627},"Utilize the GitLab DevOps platform to avoid cloud migration hazards","The GitLab modern DevOps platform can simplify and accelerate planning, managing, moving, and modernizing applications and infrastructure as companies adopt a cloud-first posture on AWS and Google Cloud.","https://about.gitlab.com/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Utilize the GitLab DevOps platform to avoid cloud migration hazards\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Nima Badiey\"}],\n        \"datePublished\": \"2022-01-25\",\n      }",{"title":2624,"description":2625,"authors":2629,"heroImage":718,"date":2631,"body":2632,"category":576,"tags":2633},[2630],"Nima Badiey","2022-01-25","\nThese unprecedented times have been an unexpected catalyst driving companies to finally get serious about moving to the cloud. The adoption wave started in retail and banking by consumers who were unable to shop and bank in-person and were forced instead to drastically increase their online purchases.\n\nAs a result, many e-commerce sites hosted on public clouds experienced a Cambrian explosion of activity and business. The impact of the pandemic soon crossed every industry and segment from healthcare and education to hospitality and food services, as more and more companies closed their offices in favor of remote work. With closed buildings came closed data centers and other short-staffing of business-critical services.\n\nCoupled with supply chain disruptions of compute, networking, and storage gear, many IT teams were faced with mounting business continuity challenges, which impacted service level agreements, product quality, and ultimately customer satisfaction.\n\nThe answer to these challenges is to move applications, data, and infrastructure from on-premises to the cloud, with hosting provided by large public cloud providers like Amazon Web Services (AWS) and Google Cloud – both of which are better suited to support business-critical services. \n\nAs businesses continue to define their new processes and procedures, one condition is likely to become permanent: Cloud adoption is expected to accelerate and spread across all industries. [IDC FutureScape](https://www.businesswire.com/news/home/20191029005144/en/IDC-FutureScape-Outlines-the-Impact-Digital-Supremacy-Will-Have-on-Enterprise-Transformation-and-the-IT-Industry) predicts that by 2024 more than 50% of all IT spending will go toward digital transformation and cloud-first innovation projects.\n\nDespite this immutable momentum, many CIOs remain reticent as 80% are still concerned that cloud adoption initiatives alone won’t deliver the expected business agility they need, according to [a McKinsey report](https://www.mckinsey.com/business-functions/mckinsey-digital/our-insights/unlocking-business-acceleration-in-a-hybrid-cloud-world).\n\nOne reason for this is that migrating and modernizing applications simultaneously to the cloud takes more effort and experience than organizations can afford. To be successful, organizations need to adopt new software development strategies and DevOps tools to support hybrid and multi-cloud models. These teams often lack the consistent methodology and toolchains to plan, prioritize, automate, and track the progress of cloud migration projects. Adding to the risks, many companies are hampered with legacy software development workflows, disconnected processes, and siloed tools. They are further burdened with a complicated inventory of mismatched legacy hardware, aging networks, security, and application stacks that are poorly suited to cloud-native architectures.\n\nUltimately, successful cloud migrations require mastering the basics by adopting proven, repeatable, and reliable processes such as breaking big initiatives into manageable workstreams. Consistency and structured repeatability have a greater impact on project success than executive sponsorship, funding, or upgrading the company culture to an “agile” mindset. GitLab plays a critical role in the successful deployment and delivery of these cloud migration projects. \n\n## DevOps: The first logical step in cloud adoption\n\nGitLab is a modern DevOps platform used by startups as well as midsize and Fortune 500 companies to build and deliver software through an integrated toolset. In simple terms, it’s Git for source code management with a built-in CI/CD pipeline that includes security, code scanning, and monitoring. GitLab is an all-in-one integrated platform. No need to digitally piece multiple solutions together and no more switching between different tools and apps just to deploy software code. \n\nAs enterprises plan to migrate apps, services, data, and/or infrastructure to the cloud this year, these projects will benefit from new ways to plan, manage, and deliver value from their cloud investments.\n\nTo get started, GitLab, together with AWS and Google Cloud, has chronicled this journey with valuable guidance to help cloud teams embrace the cultural shift necessary for modern agile teams. In these guides, we map out an approach that empowers cross-functional teams to work together concurrently during migrations, refactorization, and adoption of new cloud services.\n\nWith GitLab, users can define custom assessment methodologies, create repeatable task lists for application migration, store app code and Terraform configuration scripts in Git, and set security protocols easily through simple merge requests. GitLab can also automate the process of testing, scanning, monitoring, and deploying business apps. By embracing next-gen DevOps, cloud migration projects can be more successful with proven, repeatable, and reliable processes all managed on the GitLab DevOps platform. \n\n### Learn more:\n- [Migration to Google Cloud and adopting cloud native](https://learn.gitlab.com/gitlab-google-cloud)\n- [Accelerate your migration to AWS using a DevOps model](https://learn.gitlab.com/gitlab-aws-microsite)\n\n",[563,9,1147],{"slug":2635,"featured":6,"template":734},"utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards","content:en-us:blog:utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards.yml","Utilize The Gitlab Devops Platform To Avoid Cloud Migration Hazards","en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards.yml","en-us/blog/utilize-the-gitlab-devops-platform-to-avoid-cloud-migration-hazards",{"_path":2641,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2642,"content":2648,"config":2653,"_id":2655,"_type":14,"title":2656,"_source":16,"_file":2657,"_stem":2658,"_extension":19},"/en-us/blog/we-are-building-a-better-heroku",{"title":2643,"description":2644,"ogTitle":2643,"ogDescription":2644,"noIndex":6,"ogImage":2645,"ogUrl":2646,"ogSiteName":720,"ogType":721,"canonicalUrls":2646,"schema":2647},"We are very far from a better Heroku for production apps in a hyper cloud","GitLab is building Heroku for production apps in hyper clouds, integrated into your DevSecOps workflow: The 5 minute production app.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672405/Blog/Hero%20Images/spacex-unsplash.jpg","https://about.gitlab.com/blog/we-are-building-a-better-heroku","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"We are very far from a better Heroku for production apps in a hyper cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-03-22\",\n      }",{"title":2643,"description":2644,"authors":2649,"heroImage":2645,"date":2650,"body":2651,"category":1122,"tags":2652},[1099],"2021-03-22","\n\n{::options parse_block_html=\"true\" /}\n\n\n\n> Update: This post does not live up to its original title `We are building a better Heroku`. It shows my own personal experience and reflects poorly on competitors. I am sorry about that.\n>\n> It should have emphasized the _building_ part, we're just starting. The current 5 minute production app doesn't hold a candle to Heroku at the moment.\n> It should have made it clear the goals is to improve the speed with which you can configure a production app, not a development app. Development apps on Heroku are already close to perfect. The examples in this post are contrived since it talks about a development app, as [rightly called out by Heroku people](https://twitter.com/johnbeynon/status/1374306499426652161).\n> It should have gone into [why hyper clouds might be preferable](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#hypercloud).\n> It should have talked about state, we made a small improvement in [this MR](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/78028/diffs) but we should have done the [planned work](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/11137) and made one post out of it.\n>\n> We are very far from a better Heroku for production apps in a hyper cloud.\n\nCreating a web application has become very convenient and easy. You’ll start in your local development environment, run a dev server and verify the changes looking good. At a certain point, you want to share it with your friends on the internet. A service or server?\n\n### Use Heroku\n\nI have been a backend developer in the past 20 years. Web development is often fighting with Javascript and CSS. Especially Heroku as a deployment platform is a new area for me.\n\nLet's start with creating an account, login, and follow the web instructions to create a new app in the [documentation](https://devcenter.heroku.com/).\n\nLet’s try a fun demo, a battleship game to learn Javascript on the client and NodeJS on the server.\n\n```\n$ cd ~/dev/opensource\n$ git clone https://github.com/kubowania/battleships\n$ cd battleships\n```\n\nTest it locally, optional.\n\n```\n$ npm install\n$ npm start\n```\n\nInstall the Heroku CLI, on [macOS with Homebrew](/blog/dotfiles-document-and-automate-your-macbook-setup/).\n\n```\n$ brew install heroku/brew/heroku\n\n$ heroku autocomplete\n```\n\nThis opens a new browser window to login. Lets create an app.\n\n```\n$ heroku create\nCreating app... done, ⬢ nameless-mountain-48655\nhttps://nameless-mountain-48655.herokuapp.com/ | https://git.heroku.com/nameless-mountain-48655.git\n```\n\nThe CLI command adds a new Git remote called `heroku` where we need to push into.\n\n```\n$ git push heroku main\n\nremote: -----> Launching...\nremote:        Released v3\nremote:        https://nameless-mountain-48655.herokuapp.com/ deployed to Heroku\nremote:\nremote: Verifying deploy... done.\n```\n\nDeployed in less than 5 minutes. Getting there and installing the pre-requisites on the CLI took longer than expected.\n\n![Battleship web app deployed with Heroku](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/battleship_heroku.png){: .shadow.medium.center}\n\nLots of CLI commands involved, and it did not run in a CI/CD pipeline with additional tests before deploying it. Now the web application is deployed into a black box. Want to use Let’s Encrypt and your own domain name? How about adding the deployment natively to GitLab to have a single application in your DevOps workflow?\n\n#### Setting up Persistence with Heroku\n\nThis gets more challenging. Imagine that your app uses a relational database, a caching layer and object storage. This requires lots of CLI commands and a deep dive into the application configuration. We did not touch persistent backends in the demo app above yet.\n\nHeroku offers [PostgreSQL](https://devcenter.heroku.com/categories/postgres-basics), [Redis](https://devcenter.heroku.com/categories/heroku-redis) and [AWS S3](https://devcenter.heroku.com/articles/s3).\n\n```\nheroku addons:create heroku-postgresql:hobby-dev\nheroku addons:create heroku-postgresql:hobby-dev --version=10\n\nheroku pg:promote HEROKU_POSTGRESQL_YELLOW\n```\n\n```\nheroku addons:create heroku-redis:hobby-dev -a 5-min-prod-app\n```\n\nNote that the default `hobby-dev` plan allows unencrypted connections too.\n\n```\nheroku config:set S3_BUCKET_NAME=appname-assets\nheroku config:set AWS_ACCESS_KEY_ID=xxx AWS_SECRET_ACCESS_KEY=yyy\n```\n\nAll stateful backends in Heroku need to be secured. This requires more commands to create self-signed certificates and encrypt transport layers in the backend.\n\nAfter all, is there a better way to automate requesting stateful backend services and automate their provisioning?\n\n### A better Heroku: The 5 minute production app\n\n{::options parse_block_html=\"false\" /}\n\n\u003Cdiv class=\"center\">\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">the modern tech industry is basically folks just endlessly remaking remakes of heroku\u003C/p>&mdash; Always Miso (@monkchips) \u003Ca href=\"https://twitter.com/monkchips/status/1368924845740810249?ref_src=twsrc%5Etfw\">March 8, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003Cblockquote class=\"twitter-tweet\">\u003Cp lang=\"en\" dir=\"ltr\">Truth \u003Ca href=\"https://t.co/AFN9anBbQG\">https://t.co/AFN9anBbQG\u003C/a>\u003C/p>&mdash; Sid Sijbrandij (@sytses) \u003Ca href=\"https://twitter.com/sytses/status/1368982067229253632?ref_src=twsrc%5Etfw\">March 8, 2021\u003C/a>\u003C/blockquote> \u003Cscript async src=\"https://platform.twitter.com/widgets.js\" charset=\"utf-8\">\u003C/script>\n\n\u003C/div>\n\n\nCloud resources are cheap. AWS offers a free tier, HashiCorp Terraform has become an excellent tool to manage multi-cloud resources and GitLab integrates app packaging, container registry, deployment and TLS certificates.\n\nThere’s more application goodies: Provision a PostgreSQL VM, add Redis, SMTP email transport, custom domains with Let’s Encrypt.\n\n#### Use the 5 minute production app\n\nThe [documentation](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#usage) says to create a new AWS IAM role with credentials for automation.\n\nThe second step is to have the source code available in a GitLab project. You can use `New project > Import project > Repo by URL` to automatically import the GitHub repository `https://github.com/kubowania/battleships.git`.\n\n![Import the GitHub repository into GitLab](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_new_project_import_github_url.png){: .shadow.medium.center}\n\nOnce imported, navigate into `Settings > CI/CD > Variables` to specify the AWS credentials and region. Ensure to tick the `Masked` checkbox to hide them in all job logs.\n\n![Configure AWS credentials as masked CI/CD variables](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_5minprodapp_aws_cicd_variables.png){: .shadow.medium.center}\n\nNavigate back into the project overview. Click the `Setup CI/CD` button or open the Web IDE to create a new `.gitlab-ci.yml` file. Add the remote CI/CD template include like this:\n\n```\nvariables:\n    TF_VAR_DISABLE_POSTGRES: \"true\"\n    TF_VAR_DISABLE_REDIS: \"true\"\n\ninclude:\n  remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n```\n\nThe battleship application does not need the PostgreSQL and Redis backends. They are disabled with setting `TF_VAR_DISABLE_POSTGRES` and `TF_VAR_DISABLE_REDIS` [variables](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/blob/master/VARIABLES.md) to `false`.\n\nCommit the change to the default branch.\n\n8:43pm CET: Pipeline started with the build job. 2 min 33 sec.\n\n![GitLab pipeline builds the Docker image with Auto-Build](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_5minprodapp_pipeline_01.png){: .shadow.medium.center}\n\n8:45pm CET: Pipeline runs terraform_apply to provision AWS resources in 2min 47 sec.\n\n![GitLab pipeline runs Terraform to provision cloud resources in AWS](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/gitlab_5minprodapp_pipeline_02.png){: .shadow.medium.center}\n\n8:48pm CET: Deployed in 1 min 11 sec.\n\nThe deploy job log greets with the URL in ~5 minutes, including a Lets Encrypt TLS certificate. There we go, let’s play some battleship!\n\n![Battleship web app deployed in AWS with the 5 minute production app](https://about.gitlab.com/images/blogimages/better-heroku-5min-prod-app/battleship_5minprodapp_aws.png){: .shadow.medium.center}\n\nNote that we never left the browser and there is no CLI involved. Next to the included template, there’s also room for adding more CI tests and security best practices while hacking on this project. You can navigate into your AWS console for debugging and troubleshooting and plan with production budgets, where needed.\n\n#### Setting up Persistence with the 5 Minute Production App\n\nRemember the stateful backends with Heroku above? By default, the 5 minute production app takes care of provisioning:\n\n- PostgreSQL server and secured backend\n- Redis cluster\n- S3 object storage in AWS\n\nThe 5 minute production app uses the managed stateful services of a hypercloud so your data is persisted and secure. By leveraging these managed services (databases, caching, objects storage, etc.) you have less to maintain. Everything is provisioned through Terraform which has the following advantages:\n\n- Terraform is the most popular IaC tool.\n- Terraform works accross platforms.\n- Terraform is well-documented.\n- Terraform state can be [stored and viewed in GitLab](https://docs.gitlab.com/ee/user/infrastructure/#gitlab-managed-terraform-state).\n- You avoid the cost and complexity of Kubernetes.\n- You have complete control to customize and extend.\n\nWe will explore more stateful backends in future apps and blog posts.\n\n### 5 minute production app + DevSecOps = ❤️\n\nExample for [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) and [SAST](https://docs.gitlab.com/ee/user/application_security/sast/analyzers.html):\n\n```\ninclude:\n  - remote: https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template/-/raw/stable/deploy.yml\n  - template: Dependency-Scanning.gitlab-ci.yml\n  - template: Security/SAST.gitlab-ci.yml\n```\n\n### More to use: Database backends, TLS, environments\n\nThis blog post covers the basic learning steps with Heroku and the 5 minute production app. A typical web app requires a database, storage or caching backend, which can get complicated to run with Heroku. We will explore the setup and production experience in future blog posts. In addition to backends, we will also look into TLS certificates and production environments in CD workflows.\n\nMeanwhile, try the 5 min production app yourself:\n\n* [5 minute production app docs](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#the-5-minute-production-app)\n* [Example projects](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#examples)\n* Your own future web app with [your custom domain](https://gitlab.com/gitlab-org/5-minute-production-app/deploy-template#custom-domain)?\n\nCover image by [SpaceX](https://unsplash.com/@spacex) on [Unsplash](https://unsplash.com/photos/OHOU-5UVIYQ)\n\n",[9,563,1062],{"slug":2654,"featured":6,"template":734},"we-are-building-a-better-heroku","content:en-us:blog:we-are-building-a-better-heroku.yml","We Are Building A Better Heroku","en-us/blog/we-are-building-a-better-heroku.yml","en-us/blog/we-are-building-a-better-heroku",{"_path":2660,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2661,"content":2666,"config":2671,"_id":2673,"_type":14,"title":2674,"_source":16,"_file":2675,"_stem":2676,"_extension":19},"/en-us/blog/what-is-cloud-native",{"title":2662,"description":2663,"ogTitle":2662,"ogDescription":2663,"noIndex":6,"ogImage":718,"ogUrl":2664,"ogSiteName":720,"ogType":721,"canonicalUrls":2664,"schema":2665},"A beginner's guide to cloud native","If you’re a little fuzzy on what makes an application cloud native, this explainer will help you get up to speed.","https://about.gitlab.com/blog/what-is-cloud-native","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A beginner's guide to cloud native\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aricka Flowers\"}],\n        \"datePublished\": \"2018-10-08\",\n      }",{"title":2662,"description":2663,"authors":2667,"heroImage":718,"date":2668,"body":2669,"category":815,"tags":2670},[2022],"2018-10-08","\n\n## What is cloud native? Everything you need to know\n\nThe term [cloud native](/topics/cloud-native/) has been bandied about in the tech world a lot over the last few years, but it's still often misunderstood. Although it's an important part, simply being run in the cloud does not make an application cloud native; it must also be built in the cloud. One of the first and currently largest cloud computing providers, Amazon Web Services paved the way for cloud native app development, a now more than [$20 billion market](https://www.canalys.com/newsroom/cloud-infrastructure-spend-reaches-us%2420-billion-in-q2-2018-with-hybrid-it-approach-dominant) as of this year. With its growing popularity, organizations like the Cloud Native Computing Foundation (CNCF) have sprouted to help foster the growth of cloud native app development.\n\n[CNCF](https://www.cncf.io/), an open source software organization focused on promoting the cloud-based app building and deployment approach, [defines cloud native](https://github.com/cncf/toc/blob/master/DEFINITION.md) as the following:\n\n>Cloud native technologies empower organizations to build and run scalable applications in modern, dynamic environments such as public, private, and hybrid clouds.\n>\n>Containers, service meshes, [microservices](/topics/microservices/), immutable infrastructure, and declarative APIs exemplify this approach. These techniques enable loosely coupled systems that are resilient, manageable, and observable. Combined with robust automation, they allow engineers to make high-impact changes frequently and predictably with minimal toil.\n\nTo break it down even further: For an application to be cloud native, it must be built and run in the cloud. This requires multiple tools that allow app developers to make use of the architectural advantages of cloud infrastructure.\n\n## There are three main building blocks of cloud native architecture\n\n### Containers\n\n[Containers](/blog/containers-kubernetes-basics/) are an [alternative way to package applications](https://searchitoperations.techtarget.com/tip/What-are-containers-and-how-do-they-work) versus building for VMs or physical servers directly. Containers can run inside of a virtual machine or on a physical server. Containers hold an application’s libraries and processes, but don't include an operating system, making them lightweight. In the end, fewer servers are needed to run multiple instances of an application which reduces cost and makes them easier to scale. Some other [benefits of containers](https://tsa.com/top-5-benefits-of-containerization/) include faster deployment, better portability and scalability, and improved security.\n\n### Orchestrators\n\nOnce the containers are set, an orchestrator is needed to get them running. Container orchestrators direct how and where containers run, fix any that go down and determine if more are needed. When it comes to container orchestrators, also known as schedulers, Kubernetes is the [clear cut market winner](/blog/top-five-cloud-trends/).\n\n### Microservices\n\nThe last main component of cloud native computing is microservices. In order to make apps run more smoothly, they can be broken down into smaller parts, or microservices, to make them easier to scale based on load. Microservices infrastructure also makes it easier – and faster – for engineers to develop an app. Smaller teams can be formed and assigned to take ownership of individual components of the app’s development, allowing engineers to code without potentially impacting another part of the project.\n\nWhile public cloud services like AWS offer the opportunity to build and deploy applications easily, there are times when it makes sense to build your own infrastructure. A private or hybrid cloud solution is generally needed when sensitive data is processed within an application or industry regulations call for increased controls and security.\n\n## How to streamline cloud native development\n\nAs you can see, cloud native app development requires the incorporation of several tools for a successful deployment. It begs for a DevOps approach to efficiently streamline the multiple elements needed to get an app up and running in the cloud. This is where GitLab comes in.\n\nWe're aiming to make GitLab the best place to build cloud native apps. With [built-in registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html) and [Kubernetes integrations](https://docs.gitlab.com/ee/user/project/clusters/index.html) we're always working to offer new ways to simplify toolchains and speed up cycle times, making it easier to transition to a cloud native environment.\n\nFor a deeper dive into cloud native, check out our training video by GitLab Product Marketing Manager [William Chia](/company/team/#thewilliamchia):\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jc5cY3LoOOI?start=90\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nCover photo by [Sam Schooler](https://unsplash.com/photos/E9aetBe2w40) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[9,859],{"slug":2672,"featured":6,"template":734},"what-is-cloud-native","content:en-us:blog:what-is-cloud-native.yml","What Is Cloud Native","en-us/blog/what-is-cloud-native.yml","en-us/blog/what-is-cloud-native",{"_path":2678,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2679,"content":2684,"config":2690,"_id":2692,"_type":14,"title":2693,"_source":16,"_file":2694,"_stem":2695,"_extension":19},"/en-us/blog/what-to-expect-at-predict-2019",{"title":2680,"description":2681,"ogTitle":2680,"ogDescription":2681,"noIndex":6,"ogImage":871,"ogUrl":2682,"ogSiteName":720,"ogType":721,"canonicalUrls":2682,"schema":2683},"2019 cloud native predictions from the Predict 2019 Conference","Break out your sunglasses, because the cloud native forecast for 2019 is sunny.","https://about.gitlab.com/blog/what-to-expect-at-predict-2019","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"2019 cloud native predictions from the Predict 2019 Conference\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tina Sturgis\"}],\n        \"datePublished\": \"2018-12-12\",\n      }",{"title":2680,"description":2681,"authors":2685,"heroImage":871,"date":2687,"body":2688,"category":815,"tags":2689},[2686],"Tina Sturgis","2018-12-12","\n\nGet the latest 2019 predictions from GitLab and other industry experts. [Sign me up](https://predict2019.com/#join-us)!\n{: .alert .alert-info}\n\nI love this time of year!  But it isn't for the reasons you may be thinking ... it's not the holiday decorations, shopping for gifts for loved ones ... it is about PREDICTIONS! Yep, I am a prediction junkie! I love to stop, do a little research as the end of December rolls around, reflect on what happened in that year, and begin to forecast trends I believe will emerge in the new year.\n\nThis year, one of the most exciting areas I wanted to dive into a prediction of is [cloud native](/topics/cloud-native/). It is no longer just a ‘fad,’ enterprises are realizing benefits from adopting cloud native. So I got together with my closest GitLab team-members and we dove in to provide you with our top five predictions.\n\n## Top predictions around cloud native\n\nThe basis for cloud native applications to flourish has been set and we believe that 2019 will be a great cloud native year.\n\n* Enterprises will adopt a [multi-cloud strategy](https://medium.com/gitlab-magazine/multi-cloud-maturity-model-2de185c01dd7) for their long-term investments.\n* The cloud native stack is maturing with tools like Kubernetes, Prometheus, and Envoy.\n* We are going to see a lot more on [serverless](/topics/serverless/) with the likes of Lambda and Knative.\n* We will see some real movement in the application of artificial intelligence and machine learning.\n\n## What about DevOps and security predictions?\n\nOnce we completed our research and position on cloud native predictions, we teamed up with [DevOps.com](https://www.devops.com) to participate in their on-demand virtual conference, [Predict 2019](https://predict2019.com/#join-us), that includes predictions around cloud security, DevOps, and quality testing with a [cast of speakers](https://predict2019.com/#speakers) that will educate and inspire you as you move into 2019!\n\n[Sign up now to attend Predict 2019](https://predict2019.com/#join-us)!\n{: .alert .alert-info}\n\nPhoto by [Marc Wieland](https://unsplash.com/photos/zrj-TPjcRLA?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/clouds?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n{: .note}\n",[563,859,9,278],{"slug":2691,"featured":6,"template":734},"what-to-expect-at-predict-2019","content:en-us:blog:what-to-expect-at-predict-2019.yml","What To Expect At Predict 2019","en-us/blog/what-to-expect-at-predict-2019.yml","en-us/blog/what-to-expect-at-predict-2019",{"_path":2697,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2698,"content":2704,"config":2708,"_id":2710,"_type":14,"title":2711,"_source":16,"_file":2712,"_stem":2713,"_extension":19},"/en-us/blog/why-gitops-should-be-workflow-of-choice",{"title":2699,"description":2700,"ogTitle":2699,"ogDescription":2700,"noIndex":6,"ogImage":2701,"ogUrl":2702,"ogSiteName":720,"ogType":721,"canonicalUrls":2702,"schema":2703},"Why GitOps should be the workflow of choice","What is GitOps and how do you apply it in real-world applications?","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681239/Blog/Hero%20Images/shiro-hatori-WR-ifjFy4CI-unsplash.jpg","https://about.gitlab.com/blog/why-gitops-should-be-workflow-of-choice","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitOps should be the workflow of choice\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2020-04-17\",\n      }",{"title":2699,"description":2700,"authors":2705,"heroImage":2701,"date":1120,"body":2706,"category":752,"tags":2707},[975],"\n\n## How did we get here?\n\nIn 2006, with the launch of AWS Elastic Compute, Amazon set off a revolution in the way we, as developers, consume and use compute and other resources required to deploy and maintain the applications we write. Not long after, infrastructure-as-code started to explode onto the scene with projects like Puppet, Ansible, and Terraform.\n\nAs these technologies matured, it became apparent that scaling applications in a modern or cloud environment required reproducible, reusable components, and infrastructure-as-code became the gold standard for ensuring the proper allocation of resources to an application. At the same time, the infrastructure space and world of software continued to evolve. The concept of [continuous delivery](/topics/ci-cd/) and release of software came into vogue and was popularized by large technology companies. The \"book\" on continuous delivery came in 2011, where it became apparent that to move fast enough to keep up with market demands, a radically [faster DevOps](/topics/devops/) cycle was required.\n\nAs continuous delivery for software becomes more commonplace, new solutions in the infrastructure space have been created to keep up. Kubernetes and the rise of [\"serverless\"](/topics/serverless/) promised to once again free developers from the need to worry about infrastructure. In a post-DevOps world - how does one think about infrastructure-as-code and applications as one cohesive unit?  Enter GitOps.\n\n## What is GitOps?\n\n[GitOps](/topics/gitops/) is conceptually not that different from either infrastructure-as-code or continuous delivery. In fact, in many ways, it is the convergence of those two concepts. Developers and operations teams alike can share a common repository of code, and GitOps allows a developer-like experience for managing applications and their underlying infrastructure. In that way, you can use GitOps as an operating model for modern infrastructures like Kubernetes, serverless, and other cloud native technologies.\n\nVersion control and [continuous integration](/solutions/continuous-integration/) are essential tools for deploying software continuously and reliably. GitOps brings both of those software best practices to operations by making the repository the central-source-of-truth for all of the infrastructure required to run applications. With GitOps, any change to infrastructure is committed to the git repository along with any application changes.\n\nThis allows developers and operators to use familiar development patterns and branching strategies. From there, a merge request provides the [central place to collaborate](/topics/gitops/gitops-gitlab-collaboration/) and suggest changes. Once merged into the mainline, CI/CD should be configured to deploy both the application and infrastructure changes automatically. The way this enables synchronization between developers and operators is what can be very appealing about GitOps as the next iteration of DevOps.\n\n## Why GitOps?\n\nWhy are so many organizations large and small considering a move to a more GitOps-focused culture?\n\nAs software has eaten the world, business operational excellence has become directly aligned with the ability to deliver quality software faster. Business survival depends on adaptive and efficient software development practices. Those practices require new processes and changes in the way we think about change management.\n\nIn many software practices, the concept of code review and approval is where most of the checks and balances for deploying production code comes into play. At GitLab, we believe that the [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/) is the best place to collaborate on code and approve changes.  Processes and tools that are external to the code change only serve to increase cycle time and inhibit an organization’s ability to deploy code quickly.\n\nOnce an organization has embraced continuous integration and code review as the place for change request approval, it is a natural progression to discuss the idea of continuous delivery to production after those CI gates and human approvals are passed. As GitOps takes that concept a step further and integrates the pipeline to production directly in the git and merge request workflow, it’s become a hot topic and one that will become the normal workflow for efficient software organizations. Taking unnecessary steps and tools out of the critical path to production enables an organization to deliver better products faster, without sacrificing the governance required to deploy code.\n\n\n\nCover image by [Shiro Hatori](https://unsplash.com/@shiroscope) on [Unsplash](https://www.unsplash.com)\n{: .note}\n",[109,9,232],{"slug":2709,"featured":6,"template":734},"why-gitops-should-be-workflow-of-choice","content:en-us:blog:why-gitops-should-be-workflow-of-choice.yml","Why Gitops Should Be Workflow Of Choice","en-us/blog/why-gitops-should-be-workflow-of-choice.yml","en-us/blog/why-gitops-should-be-workflow-of-choice",{"_path":2715,"_dir":245,"_draft":6,"_partial":6,"_locale":7,"seo":2716,"content":2722,"config":2728,"_id":2730,"_type":14,"title":2731,"_source":16,"_file":2732,"_stem":2733,"_extension":19},"/en-us/blog/zapier-pick-your-brain-interview",{"title":2717,"description":2718,"ogTitle":2717,"ogDescription":2718,"noIndex":6,"ogImage":2719,"ogUrl":2720,"ogSiteName":720,"ogType":721,"canonicalUrls":2720,"schema":2721},"Scaling communication at Zapier","GitLab CEO Sid Sijbrandij sits down with Zapier team members to chat about communication challenges in each growing company.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749680279/Blog/Hero%20Images/zapier-pyb-post.jpg","https://about.gitlab.com/blog/zapier-pick-your-brain-interview","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Scaling communication at Zapier\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Manger\"}],\n        \"datePublished\": \"2018-01-08\",\n      }",{"title":2717,"description":2718,"authors":2723,"heroImage":2719,"date":2725,"body":2726,"category":2202,"tags":2727},[2724],"Noah Manger","2018-01-08","\n_On November 17, Mike Knoop and Noah Manger of Zapier [sat down with GitLab’s CEO Sid Sijbrandij](https://handbook.gitlab.com/handbook/eba/ceo-scheduling/#pick-your-brain-meetings) to discuss the way the two companies approach the challenge of scaling communication as a company grows. This transcript has been lightly edited for clarity._\n\n\u003C!-- more -->\n\n**The heartbeat of our organization is our weekly Friday Update posts that everyone at the company writes. The problem is that as we’ve grown, it’s become a tremendous amount of information. We’re really good at generating the firehose of content, but not as good at consuming it. So I’d love to learn what processes you use at GitLab and if you feel like you’ve got a good grip on this problem?**\n\nWe have a #working-on Slack channel but I’m working on killing it because I just don’t care what people in another part of the company are working on. I just don’t care. There are just too many people at 200 people.\n\nWhat works really well for us are the [functional group updates](https://handbook.gitlab.com/handbook/communication/ask-me-anything/). Every day of the week there’s a presentation (for a maximum of 25 minutes) by a team lead with a slide deck of what they’re working on, and there's an opportunity to ask questions. So you get to stay updated about what all the development teams are doing, what sales is doing, what marketing is doing, what legal is doing, what partnerships is doing. It’s on a three-week rotation so 15 different functional areas and then we start over from the top.\n\n**Do you measure how many people consume this content?**\n\nLive in the call it’s about 50 but it differs on the matter. It’s planned for every single day. We hadn’t scheduled them for a month or two and everyone in the company reported feeling out of touch about where the company was going and what people were working on.\n\nWe are doing asynchronous stand-ups, but it’s just for something that’s a high priority project and there’s a chance of delay that we can’t afford. Right now there are three groups on asynchronous stand-ups that are super high-priority projects and we want to make sure that nobody’s blocked. So someone posts a message saying “Asynchronous stand-up for today” and then everyone posts in the thread what they’re working on and may be blocked on.\n\nNormally we don’t do it and we just work in [GitLab issues](https://docs.gitlab.com/ee/user/project/issues/). When you start working on something you assign your name to it, and so if you want to know what someone is working on you see what issues are assigned to them.\n\n**Why did you choose to do functional updates as videos rather than written?**\n\nIt allows for more interaction. Yesterday my update had three topics and I had a slide for each. People ask questions mostly in the chat feed in Zoom. Sometimes if they have an elaborate question I’ll ask them to explain more verbally. We spend 15-20 minutes on people asking questions. That’s what we want — it’s not typical — but that’s where we want to go. Sometimes people have a lot to present and talk for 20 minutes, but we want to try to split those up and constrain the presenting part to 10 minutes.\n\nAnd if they’re over in 10 or 15 minutes and nobody has any questions, that’s great.\n\nAnd one thing: the presentation slides have to be linked to in the invite before the presentation starts. People have to be able to invest one minute to click through the presentation to see if they need to join the call, or they can just say “This is great and I don’t need to join.” And obviously everything is recorded: it’s put into our Google Drive and you can see everything and the ones that are able to be public will be posted every Friday.\n\n**Is it typical for someone to join every update?**\n\nI join about two thirds of them.\n\n**So if someone were to join every one it’d be a two-and-a-half-hour time commitment every week?**\n\nYeah, but you won’t be asked any questions so you’re able to multitask and zone out if you don’t need to pay attention.\n\n**You’re global, so how do you deal with time zones? Do you rotate it around so other people are able to join?**\n\nYou’re not expected to join at all. These are optional; join them if you want to. But time zones are the bane of our existence. Most of our people are either in Europe or the Americas, so we do this in our most convenient time zones. So our functional updates are at 8am Pacific and our team call is at 8:30am Pacific. There’s been a trend of scheduling meetings over this but we’re trying to prevent that.\n\n**What percentage of content about the company do people consume over video versus writing?**\n\nIt depends. It depends on how they like to consume content. If you’re good with written content, you can get by with the handbook and the presentations. If you like to consume it by listening and hearing people interact, then the video calls are a good way to do it. What’s important is that we make both ways available and then people can do it as they please.\n\n>If you’re good with written content, you can get by with the handbook and the presentations. If you like to consume it by listening and hearing people interact, then the video calls are a good way to do it. What’s important is that we make both ways available and then people can do it as they please.\n\nAnd some people might not care so much! Some people are happy being an open source developer and don’t care about the machinations of a company and that is A-OK. We’re not going to force you to sit through this or check your attendance rate. That is just fine.\n\nBut some people really care and they care about all the aspects. They joined a startup because they want to know what’s happening. For example, when we were doing fundraising we had a fundraising Slack channel and people were asking questions like “What’s the liquidation preference?” And that’s great. If you’re interested we’re not trying to shield you; we don’t want you to get too distracted but it’s there if you want to dive in.\n\n**Do you find people have anxiety around keeping up with information and being concerned they’re not missing things?**\n\nWhat people report is that starting here is overwhelming. The first month is a dark place. We never have people quit during that time but everyone reports that it’s super hard on them. We have one onboarding issue that has about 100 checkboxes you need to check off. And we try to have it all go by what you do on Day 1, Day 2. But it’s very overwhelming. We try to figure out what to cut, but everyone says “No, it’s good to have it all there.” When you first join you have access to the entire map of GitLab so you have to constrain your view.\n\n**Are there other things that you do to help teams know what other teams are doing around the organization?**\n\nWell the [handbook](https://handbook.gitlab.com/handbook/) is really important. That contains all our processes, all the different departments, how they operate, who’s responsible, what Slack channels they’re on, which issue trackers they use; our definitions; our stages in the sales process. Everything should be in there. It’s hard to get right, so it’s a constant focus of my attention. But the idea is if you want to make a change to the company you propose an edit to the handbook, make a merge request, and then if you merge it you announce it. It’s the best handbook in the world; there’s lots of room for improvement, but it’s good and lets you see how lots of different parts of the company operate.\n\nAnd of course we use our own tools. So our customer success team uses an [issue board](https://gitlab.com/gitlab-com/customer-success/sa-service-desk/boards/339477?=) so you can see what they’re doing and what stage it’s in. So we try to use our issue boards and our static websites so you can peer into any part of the company.\n\nOne thing we’re still getting better at is how to expose metrics. We already have a good metrics sheet that’s up to date, consuming all the revenue models and everything we have, but I want that to be a real-time thing that looks a bit prettier and has some better graphs.\n\nAnother thing we do to keep everyone posted is everyone gets the investor update. So every single month, between the 10th and 15th, we send out an investor update about what was good, what was bad, and all our core metrics and everyone in the company gets it.\n\n**Do people find that helpful?**\n\nI think people find it helpful. I believe if you want people to invest in the company you have to treat them like investors – which they are, because they have options. I think what people pay attention to most is runway (months of cash remaining) and what’s bad.\n\n**One thing we’ve heard is that people want a weekly set of highlights of the things that they need to know. Do you do anything like that?**\n\nI’ve never heard that. If your communication is any good, you repeat yourself a lot. I have a #ceo Slack channel, so hopefully what I say there is congruent with what I say in the investor update is congruent with what the leaders in marketing and sales are saying, etc. We’re not trying to make it the same message, but in a perfect world it’d be the same message.\n\n>If your communication is any good, you repeat yourself a lot.\n\nSo no, I’ve never heard the need for a summary. If I ever need to go find what sales was doing two weeks ago, I’d go find their functional update from two weeks ago.\n\n**If I switched to a new product team and I wanted to know what my new team has been working on, what would I do?**\n\nYou’d look at the functional updates. And also you could join the kickoffs and retrospectives, which happen every month and are broadcast live on [YouTube](https://www.youtube.com/c/Gitlab). So that’s another channel you could use.\n\n**At which stage in your growth did you start doing those functional updates?**\n\nI don’t know exactly. About 50 engineers. But it’s also because this is an open source project and people who are contributing to the project but aren’t part of GitLab are wondering what’s in the pipeline and what’s happening.\n\n**Do you have any internal blog or tools that people log into to get information about what the company is up to?**\n\nNo. There’s the handbook, but for regular updates that’s what the functional team updates and issue boards are for.\n\n**Do you feel like there’s things that aren’t shared that should be? Are those functional updates high enough bandwidth or frequent enough to get everything across?**\n\nWith our kickoffs, because they’re live broadcast, some of our product managers would get into presentation mode, like “Everything’s going to be wonderful!” There’s going to be some of that, but I think it could be more measured and raw. In our retrospectives there’s a more of that. People are also used to asking hard questions and getting praised for that. You say things like “Wow, that’s a hard question. That’s the best question we’ve got.”\n\n**If I’m a product manager and I’m about to release something that will affect the product and I don’t have a functional update this week, what’s the best way to do that?**\n\nFor the company, post in the general chat channel that will be consumed by many people and you mention the related people. If you need it externally you could do a blog post, but usually you could just do it in the issue and then tweet it from your personal account and it will be retweeted by GitLab.\n\n**So you depend on Slack for urgent notifications?**\n\nSlack is great for urgency. It’s its downfall as well.\n\n**What have been either pain points or surprises as you’ve gone from 100 to 200 people?**\n\nI think a pain point we’re experiencing right now is our team call. It’s too many people. We try to rotate people now, but after about 150 people, people lost track. And if you lose track you lose interest. So we’re thinking about getting a smaller group of people together, maybe even 7-15, and having them talk every day for a sustained period so you get to know them and then you switch up the groups.\n\nAlso, overuse of @channel mentions is a pet peeve. It’s only allowed if it’s urgent *and* important but people use them if it’s *only* urgent or *only* important. Those should just be posted without an @channel or @here mention. If my Slack always has a constant red thing then I’ll stop paying attention. It’s a tragedy of the commons.\n\n**Do you have any tricks for organizing Slack?**\n\nThere’s a few special channels: #thanks where we call people out for helping that gets about 10 posts a day and that’s one of my favorite channels.\n\nThere’s an #emotional channel where you can just complain about shit. And that’s allowed and encouraged and we give teddy bear emojis back.\n\n**How many channels do you have?**\n\nHundreds. More channels than people.\n\n**How do people navigate that when they join? Do you do anything to help them figure out which channels to join?**\n\nIt’s organic. These people already feel overwhelmed, do you want to give them more channels? It just gets worse. And in the handbook you can see what the channels are for your group.\n\n**Since we’re talking about cross-team collaboration, can you tell us about your summit?**\n\nWe try to do it every nine months and it’s forbidden to organize functional meetings there. So you can’t meet with the just sales or marketing team. Instead we have an '[unconference](https://en.wikipedia.org/wiki/Unconference)' based on the Lobby Conference, that’s built on user-generated content. We have two half days where people propose subjects, people vote on them, and someone kicks things off for five minutes and then a group of 15-20 people discuss it for 50 minutes.\n\nYou know the people in your team already, so we said “Please, please, please meet with other people.” The top two sessions at the last one were on avoiding burnout and how to keep yourself motivated while working at home. I was glad to see people organized sessions like that because we can do the purely job-related stuff at other times.\n\n**Well thanks. This has been really great and has challenged some of our assumptions. We’ve been assuming that we’re generating all this content and we need to figure out what the right curation layer is. But it sounds like you’ve been very successful at reducing the amount of content that’s generated in the first place but forcing it all to go through those channels, which solves the curation problem that way.**\n\n\n## About the guest author\n\nNoah Manger is a product manager, designer and developer, currently leading the Internal Tools team at Zapier. He lives in Portland, Oregon.\n\nCover image by [Alexandr Bormotin](https://unsplash.com/@bormot) on [Unsplash](https://unsplash.com/photos/Hd8b_WtKIck).\n",[9,2204,1792],{"slug":2729,"featured":6,"template":734},"zapier-pick-your-brain-interview","content:en-us:blog:zapier-pick-your-brain-interview.yml","Zapier Pick Your Brain Interview","en-us/blog/zapier-pick-your-brain-interview.yml","en-us/blog/zapier-pick-your-brain-interview",1760988319187]